Unreviewed, rolling out r215476.
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2017 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeUseDef.h"
39 #include "CallLinkStatus.h"
40 #include "CodeBlockSet.h"
41 #include "DFGCapabilities.h"
42 #include "DFGCommon.h"
43 #include "DFGDriver.h"
44 #include "DFGJITCode.h"
45 #include "DFGWorklist.h"
46 #include "Debugger.h"
47 #include "EvalCodeBlock.h"
48 #include "FullCodeOrigin.h"
49 #include "FunctionCodeBlock.h"
50 #include "FunctionExecutableDump.h"
51 #include "GetPutInfo.h"
52 #include "InlineCallFrame.h"
53 #include "InterpreterInlines.h"
54 #include "JIT.h"
55 #include "JITMathIC.h"
56 #include "JSCInlines.h"
57 #include "JSCJSValue.h"
58 #include "JSFunction.h"
59 #include "JSLexicalEnvironment.h"
60 #include "JSModuleEnvironment.h"
61 #include "JSSet.h"
62 #include "JSString.h"
63 #include "JSTemplateRegistryKey.h"
64 #include "LLIntData.h"
65 #include "LLIntEntrypoint.h"
66 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
67 #include "LowLevelInterpreter.h"
68 #include "ModuleProgramCodeBlock.h"
69 #include "PCToCodeOriginMap.h"
70 #include "PolymorphicAccess.h"
71 #include "ProfilerDatabase.h"
72 #include "ProgramCodeBlock.h"
73 #include "ReduceWhitespace.h"
74 #include "Repatch.h"
75 #include "SlotVisitorInlines.h"
76 #include "StackVisitor.h"
77 #include "StructureStubInfo.h"
78 #include "TypeLocationCache.h"
79 #include "TypeProfiler.h"
80 #include "UnlinkedInstructionStream.h"
81 #include "VMInlines.h"
82 #include <wtf/BagToHashMap.h>
83 #include <wtf/CommaPrinter.h>
84 #include <wtf/SimpleStats.h>
85 #include <wtf/StringExtras.h>
86 #include <wtf/StringPrintStream.h>
87 #include <wtf/text/UniquedStringImpl.h>
88
89 #if ENABLE(JIT)
90 #include "RegisterAtOffsetList.h"
91 #endif
92
93 #if ENABLE(DFG_JIT)
94 #include "DFGOperations.h"
95 #endif
96
97 #if ENABLE(FTL_JIT)
98 #include "FTLJITCode.h"
99 #endif
100
101 namespace JSC {
102
103 const ClassInfo CodeBlock::s_info = {
104     "CodeBlock", 0, 0,
105     CREATE_METHOD_TABLE(CodeBlock)
106 };
107
108 CString CodeBlock::inferredName() const
109 {
110     switch (codeType()) {
111     case GlobalCode:
112         return "<global>";
113     case EvalCode:
114         return "<eval>";
115     case FunctionCode:
116         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
117     case ModuleCode:
118         return "<module>";
119     default:
120         CRASH();
121         return CString("", 0);
122     }
123 }
124
125 bool CodeBlock::hasHash() const
126 {
127     return !!m_hash;
128 }
129
130 bool CodeBlock::isSafeToComputeHash() const
131 {
132     return !isCompilationThread();
133 }
134
135 CodeBlockHash CodeBlock::hash() const
136 {
137     if (!m_hash) {
138         RELEASE_ASSERT(isSafeToComputeHash());
139         m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
140     }
141     return m_hash;
142 }
143
144 CString CodeBlock::sourceCodeForTools() const
145 {
146     if (codeType() != FunctionCode)
147         return ownerScriptExecutable()->source().toUTF8();
148     
149     SourceProvider* provider = source();
150     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
151     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
152     unsigned unlinkedStartOffset = unlinked->startOffset();
153     unsigned linkedStartOffset = executable->source().startOffset();
154     int delta = linkedStartOffset - unlinkedStartOffset;
155     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
156     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
157     return toCString(
158         "function ",
159         provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
160 }
161
162 CString CodeBlock::sourceCodeOnOneLine() const
163 {
164     return reduceWhitespace(sourceCodeForTools());
165 }
166
167 CString CodeBlock::hashAsStringIfPossible() const
168 {
169     if (hasHash() || isSafeToComputeHash())
170         return toCString(hash());
171     return "<no-hash>";
172 }
173
174 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
175 {
176     out.print(inferredName(), "#", hashAsStringIfPossible());
177     out.print(":[", RawPointer(this), "->");
178     if (!!m_alternative)
179         out.print(RawPointer(alternative()), "->");
180     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
181
182     if (codeType() == FunctionCode)
183         out.print(specializationKind());
184     out.print(", ", instructionCount());
185     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
186         out.print(" (ShouldAlwaysBeInlined)");
187     if (ownerScriptExecutable()->neverInline())
188         out.print(" (NeverInline)");
189     if (ownerScriptExecutable()->neverOptimize())
190         out.print(" (NeverOptimize)");
191     else if (ownerScriptExecutable()->neverFTLOptimize())
192         out.print(" (NeverFTLOptimize)");
193     if (ownerScriptExecutable()->didTryToEnterInLoop())
194         out.print(" (DidTryToEnterInLoop)");
195     if (ownerScriptExecutable()->isStrictMode())
196         out.print(" (StrictMode)");
197     if (m_didFailJITCompilation)
198         out.print(" (JITFail)");
199     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
200         out.print(" (FTLFail)");
201     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
202         out.print(" (HadFTLReplacement)");
203     out.print("]");
204 }
205
206 void CodeBlock::dump(PrintStream& out) const
207 {
208     dumpAssumingJITType(out, jitType());
209 }
210
211 void CodeBlock::dumpSource()
212 {
213     dumpSource(WTF::dataFile());
214 }
215
216 void CodeBlock::dumpSource(PrintStream& out)
217 {
218     ScriptExecutable* executable = ownerScriptExecutable();
219     if (executable->isFunctionExecutable()) {
220         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
221         StringView source = functionExecutable->source().provider()->getRange(
222             functionExecutable->parametersStartOffset(),
223             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
224         
225         out.print("function ", inferredName(), source);
226         return;
227     }
228     out.print(executable->source().view());
229 }
230
231 void CodeBlock::dumpBytecode()
232 {
233     dumpBytecode(WTF::dataFile());
234 }
235
236 void CodeBlock::dumpBytecode(PrintStream& out)
237 {
238     StubInfoMap stubInfos;
239     CallLinkInfoMap callLinkInfos;
240     getStubInfoMap(stubInfos);
241     getCallLinkInfoMap(callLinkInfos);
242     BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, stubInfos, callLinkInfos);
243 }
244
245 void CodeBlock::dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
246 {
247     BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin, it, stubInfos, callLinkInfos);
248 }
249
250 void CodeBlock::dumpBytecode(
251     PrintStream& out, unsigned bytecodeOffset,
252     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
253 {
254     const Instruction* it = instructions().begin() + bytecodeOffset;
255     dumpBytecode(out, instructions().begin(), it, stubInfos, callLinkInfos);
256 }
257
258 #define FOR_EACH_MEMBER_VECTOR(macro) \
259     macro(instructions) \
260     macro(callLinkInfos) \
261     macro(linkedCallerList) \
262     macro(identifiers) \
263     macro(functionExpressions) \
264     macro(constantRegisters)
265
266 template<typename T>
267 static size_t sizeInBytes(const Vector<T>& vector)
268 {
269     return vector.capacity() * sizeof(T);
270 }
271
272 namespace {
273
274 class PutToScopeFireDetail : public FireDetail {
275 public:
276     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
277         : m_codeBlock(codeBlock)
278         , m_ident(ident)
279     {
280     }
281     
282     void dump(PrintStream& out) const override
283     {
284         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
285     }
286     
287 private:
288     CodeBlock* m_codeBlock;
289     const Identifier& m_ident;
290 };
291
292 } // anonymous namespace
293
294 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
295     : JSCell(*vm, structure)
296     , m_globalObject(other.m_globalObject)
297     , m_numCalleeLocals(other.m_numCalleeLocals)
298     , m_numVars(other.m_numVars)
299     , m_shouldAlwaysBeInlined(true)
300 #if ENABLE(JIT)
301     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
302 #endif
303     , m_didFailJITCompilation(false)
304     , m_didFailFTLCompilation(false)
305     , m_hasBeenCompiledWithFTL(false)
306     , m_isConstructor(other.m_isConstructor)
307     , m_isStrictMode(other.m_isStrictMode)
308     , m_codeType(other.m_codeType)
309     , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
310     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
311     , m_hasDebuggerStatement(false)
312     , m_steppingMode(SteppingModeDisabled)
313     , m_numBreakpoints(0)
314     , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
315     , m_vm(other.m_vm)
316     , m_instructions(other.m_instructions)
317     , m_thisRegister(other.m_thisRegister)
318     , m_scopeRegister(other.m_scopeRegister)
319     , m_hash(other.m_hash)
320     , m_source(other.m_source)
321     , m_sourceOffset(other.m_sourceOffset)
322     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
323     , m_constantRegisters(other.m_constantRegisters)
324     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
325     , m_functionDecls(other.m_functionDecls)
326     , m_functionExprs(other.m_functionExprs)
327     , m_osrExitCounter(0)
328     , m_optimizationDelayCounter(0)
329     , m_reoptimizationRetryCounter(0)
330     , m_creationTime(std::chrono::steady_clock::now())
331 {
332     m_visitWeaklyHasBeenCalled = false;
333
334     ASSERT(heap()->isDeferred());
335     ASSERT(m_scopeRegister.isLocal());
336
337     setNumParameters(other.numParameters());
338 }
339
340 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
341 {
342     Base::finishCreation(vm);
343
344     optimizeAfterWarmUp();
345     jitAfterWarmUp();
346
347     if (other.m_rareData) {
348         createRareDataIfNecessary();
349         
350         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
351         m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
352         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
353         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
354     }
355     
356     heap()->m_codeBlocks->add(this);
357 }
358
359 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
360     JSScope* scope, RefPtr<SourceProvider>&& sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
361     : JSCell(*vm, structure)
362     , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
363     , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
364     , m_numVars(unlinkedCodeBlock->m_numVars)
365     , m_shouldAlwaysBeInlined(true)
366 #if ENABLE(JIT)
367     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
368 #endif
369     , m_didFailJITCompilation(false)
370     , m_didFailFTLCompilation(false)
371     , m_hasBeenCompiledWithFTL(false)
372     , m_isConstructor(unlinkedCodeBlock->isConstructor())
373     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
374     , m_codeType(unlinkedCodeBlock->codeType())
375     , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
376     , m_hasDebuggerStatement(false)
377     , m_steppingMode(SteppingModeDisabled)
378     , m_numBreakpoints(0)
379     , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
380     , m_vm(unlinkedCodeBlock->vm())
381     , m_thisRegister(unlinkedCodeBlock->thisRegister())
382     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
383     , m_source(WTFMove(sourceProvider))
384     , m_sourceOffset(sourceOffset)
385     , m_firstLineColumnOffset(firstLineColumnOffset)
386     , m_osrExitCounter(0)
387     , m_optimizationDelayCounter(0)
388     , m_reoptimizationRetryCounter(0)
389     , m_creationTime(std::chrono::steady_clock::now())
390 {
391     m_visitWeaklyHasBeenCalled = false;
392
393     ASSERT(heap()->isDeferred());
394     ASSERT(m_scopeRegister.isLocal());
395
396     ASSERT(m_source);
397     setNumParameters(unlinkedCodeBlock->numParameters());
398 }
399
400 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
401     JSScope* scope)
402 {
403     Base::finishCreation(vm);
404
405     if (vm.typeProfiler() || vm.controlFlowProfiler())
406         vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
407
408     if (!setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation()))
409         return false;
410     if (!setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets()))
411         return false;
412     if (unlinkedCodeBlock->usesGlobalObject())
413         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
414
415     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
416         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
417         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
418             m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
419     }
420
421     // We already have the cloned symbol table for the module environment since we need to instantiate
422     // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
423     if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
424         SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
425         if (m_vm->typeProfiler()) {
426             ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
427             clonedSymbolTable->prepareForTypeProfiling(locker);
428         }
429         replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
430     }
431
432     bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
433     m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
434     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
435         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
436         if (shouldUpdateFunctionHasExecutedCache)
437             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
438         m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
439     }
440
441     m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
442     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
443         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
444         if (shouldUpdateFunctionHasExecutedCache)
445             vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
446         m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
447     }
448
449     if (unlinkedCodeBlock->hasRareData()) {
450         createRareDataIfNecessary();
451         if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
452             m_rareData->m_constantBuffers.grow(count);
453             for (size_t i = 0; i < count; i++) {
454                 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
455                 m_rareData->m_constantBuffers[i] = buffer;
456             }
457         }
458         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
459             m_rareData->m_exceptionHandlers.resizeToFit(count);
460             for (size_t i = 0; i < count; i++) {
461                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
462                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
463 #if ENABLE(JIT)
464                 handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
465 #else
466                 handler.initialize(unlinkedHandler);
467 #endif
468             }
469         }
470
471         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
472             m_rareData->m_stringSwitchJumpTables.grow(count);
473             for (size_t i = 0; i < count; i++) {
474                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
475                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
476                 for (; ptr != end; ++ptr) {
477                     OffsetLocation offset;
478                     offset.branchOffset = ptr->value.branchOffset;
479                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
480                 }
481             }
482         }
483
484         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
485             m_rareData->m_switchJumpTables.grow(count);
486             for (size_t i = 0; i < count; i++) {
487                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
488                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
489                 destTable.branchOffsets = sourceTable.branchOffsets;
490                 destTable.min = sourceTable.min;
491             }
492         }
493     }
494
495     // Allocate metadata buffers for the bytecode
496     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
497         m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
498     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
499         m_arrayProfiles.grow(size);
500     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
501         m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
502     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
503         m_valueProfiles = RefCountedArray<ValueProfile>(size);
504     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
505         m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
506
507 #if ENABLE(JIT)
508     setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
509 #endif
510
511     // Copy and translate the UnlinkedInstructions
512     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
513     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
514
515     // Bookkeep the strongly referenced module environments.
516     HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
517
518     RefCountedArray<Instruction> instructions(instructionCount);
519
520     unsigned valueProfileCount = 0;
521     auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
522         unsigned valueProfileIndex = valueProfileCount++;
523         ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
524         ASSERT(profile->m_bytecodeOffset == -1);
525         profile->m_bytecodeOffset = bytecodeOffset;
526         instructions[bytecodeOffset + opLength - 1] = profile;
527     };
528
529     for (unsigned i = 0; !instructionReader.atEnd(); ) {
530         const UnlinkedInstruction* pc = instructionReader.next();
531
532         unsigned opLength = opcodeLength(pc[0].u.opcode);
533
534         instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
535         for (size_t j = 1; j < opLength; ++j) {
536             if (sizeof(int32_t) != sizeof(intptr_t))
537                 instructions[i + j].u.pointer = 0;
538             instructions[i + j].u.operand = pc[j].u.operand;
539         }
540         switch (pc[0].u.opcode) {
541         case op_has_indexed_property: {
542             int arrayProfileIndex = pc[opLength - 1].u.operand;
543             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
544
545             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
546             break;
547         }
548         case op_call_varargs:
549         case op_tail_call_varargs:
550         case op_tail_call_forward_arguments:
551         case op_construct_varargs:
552         case op_get_by_val: {
553             int arrayProfileIndex = pc[opLength - 2].u.operand;
554             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
555
556             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
557             FALLTHROUGH;
558         }
559         case op_get_direct_pname:
560         case op_get_by_id:
561         case op_get_by_id_with_this:
562         case op_try_get_by_id:
563         case op_get_by_val_with_this:
564         case op_get_from_arguments:
565         case op_to_number:
566         case op_get_argument: {
567             linkValueProfile(i, opLength);
568             break;
569         }
570
571         case op_in:
572         case op_put_by_val:
573         case op_put_by_val_direct: {
574             int arrayProfileIndex = pc[opLength - 1].u.operand;
575             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
576             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
577             break;
578         }
579
580         case op_new_array:
581         case op_new_array_buffer:
582         case op_new_array_with_size: {
583             int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
584             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
585             break;
586         }
587         case op_new_object: {
588             int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
589             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
590             int inferredInlineCapacity = pc[opLength - 2].u.operand;
591
592             instructions[i + opLength - 1] = objectAllocationProfile;
593             objectAllocationProfile->initialize(vm,
594                 m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity);
595             break;
596         }
597
598         case op_call:
599         case op_tail_call:
600         case op_call_eval: {
601             linkValueProfile(i, opLength);
602             int arrayProfileIndex = pc[opLength - 2].u.operand;
603             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
604             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
605             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
606             break;
607         }
608         case op_construct: {
609             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
610             linkValueProfile(i, opLength);
611             break;
612         }
613         case op_get_array_length:
614             CRASH();
615
616         case op_resolve_scope: {
617             const Identifier& ident = identifier(pc[3].u.operand);
618             ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
619             RELEASE_ASSERT(type != LocalClosureVar);
620             int localScopeDepth = pc[5].u.operand;
621
622             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
623             instructions[i + 4].u.operand = op.type;
624             instructions[i + 5].u.operand = op.depth;
625             if (op.lexicalEnvironment) {
626                 if (op.type == ModuleVar) {
627                     // Keep the linked module environment strongly referenced.
628                     if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
629                         addConstant(op.lexicalEnvironment);
630                     instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
631                 } else
632                     instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
633             } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
634                 instructions[i + 6].u.jsCell.set(vm, this, constantScope);
635             else
636                 instructions[i + 6].u.pointer = nullptr;
637             break;
638         }
639
640         case op_get_from_scope: {
641             linkValueProfile(i, opLength);
642
643             // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
644
645             int localScopeDepth = pc[5].u.operand;
646             instructions[i + 5].u.pointer = nullptr;
647
648             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
649             ASSERT(!isInitialization(getPutInfo.initializationMode()));
650             if (getPutInfo.resolveType() == LocalClosureVar) {
651                 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
652                 break;
653             }
654
655             const Identifier& ident = identifier(pc[3].u.operand);
656             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
657
658             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
659             if (op.type == ModuleVar)
660                 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
661             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
662                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
663             else if (op.structure)
664                 instructions[i + 5].u.structure.set(vm, this, op.structure);
665             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
666             break;
667         }
668
669         case op_put_to_scope: {
670             // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
671             GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
672             if (getPutInfo.resolveType() == LocalClosureVar) {
673                 // Only do watching if the property we're putting to is not anonymous.
674                 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
675                     int symbolTableIndex = pc[5].u.operand;
676                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
677                     const Identifier& ident = identifier(pc[2].u.operand);
678                     ConcurrentJSLocker locker(symbolTable->m_lock);
679                     auto iter = symbolTable->find(locker, ident.impl());
680                     ASSERT(iter != symbolTable->end(locker));
681                     iter->value.prepareToWatch();
682                     instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
683                 } else
684                     instructions[i + 5].u.watchpointSet = nullptr;
685                 break;
686             }
687
688             const Identifier& ident = identifier(pc[2].u.operand);
689             int localScopeDepth = pc[5].u.operand;
690             instructions[i + 5].u.pointer = nullptr;
691             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
692
693             instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
694             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
695                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
696             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
697                 if (op.watchpointSet)
698                     op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
699             } else if (op.structure)
700                 instructions[i + 5].u.structure.set(vm, this, op.structure);
701             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
702
703             break;
704         }
705
706         case op_profile_type: {
707             RELEASE_ASSERT(vm.typeProfiler());
708             // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
709             size_t instructionOffset = i + opLength - 1;
710             unsigned divotStart, divotEnd;
711             GlobalVariableID globalVariableID = 0;
712             RefPtr<TypeSet> globalTypeSet;
713             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
714             VirtualRegister profileRegister(pc[1].u.operand);
715             ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
716             SymbolTable* symbolTable = nullptr;
717
718             switch (flag) {
719             case ProfileTypeBytecodeClosureVar: {
720                 const Identifier& ident = identifier(pc[4].u.operand);
721                 int localScopeDepth = pc[2].u.operand;
722                 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
723                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
724                 // we're abstractly "read"ing from a JSScope.
725                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
726
727                 if (op.type == ClosureVar || op.type == ModuleVar)
728                     symbolTable = op.lexicalEnvironment->symbolTable();
729                 else if (op.type == GlobalVar)
730                     symbolTable = m_globalObject.get()->symbolTable();
731
732                 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
733                 if (symbolTable) {
734                     ConcurrentJSLocker locker(symbolTable->m_lock);
735                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
736                     symbolTable->prepareForTypeProfiling(locker);
737                     globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
738                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
739                 } else
740                     globalVariableID = TypeProfilerNoGlobalIDExists;
741
742                 break;
743             }
744             case ProfileTypeBytecodeLocallyResolved: {
745                 int symbolTableIndex = pc[2].u.operand;
746                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
747                 const Identifier& ident = identifier(pc[4].u.operand);
748                 ConcurrentJSLocker locker(symbolTable->m_lock);
749                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
750                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
751                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
752
753                 break;
754             }
755             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
756             case ProfileTypeBytecodeFunctionArgument: {
757                 globalVariableID = TypeProfilerNoGlobalIDExists;
758                 break;
759             }
760             case ProfileTypeBytecodeFunctionReturnStatement: {
761                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
762                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
763                 globalVariableID = TypeProfilerReturnStatement;
764                 if (!shouldAnalyze) {
765                     // Because a return statement can be added implicitly to return undefined at the end of a function,
766                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
767                     // the user's program, give the type profiler some range to identify these return statements.
768                     // Currently, the text offset that is used as identification is "f" in the function keyword
769                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
770                     divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
771                     shouldAnalyze = true;
772                 }
773                 break;
774             }
775             }
776
777             std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
778                 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
779             TypeLocation* location = locationPair.first;
780             bool isNewLocation = locationPair.second;
781
782             if (flag == ProfileTypeBytecodeFunctionReturnStatement)
783                 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
784
785             if (shouldAnalyze && isNewLocation)
786                 vm.typeProfiler()->insertNewLocation(location);
787
788             instructions[i + 2].u.location = location;
789             break;
790         }
791
792         case op_debug: {
793             if (pc[1].u.unsignedValue == DidReachBreakpoint)
794                 m_hasDebuggerStatement = true;
795             break;
796         }
797
798         case op_create_rest: {
799             int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
800             ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
801             // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
802             m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
803             break;
804         }
805
806         default:
807             break;
808         }
809         i += opLength;
810     }
811
812     if (vm.controlFlowProfiler())
813         insertBasicBlockBoundariesForControlFlowProfiler(instructions);
814
815     m_instructions = WTFMove(instructions);
816
817     // Set optimization thresholds only after m_instructions is initialized, since these
818     // rely on the instruction count (and are in theory permitted to also inspect the
819     // instruction stream to more accurate assess the cost of tier-up).
820     optimizeAfterWarmUp();
821     jitAfterWarmUp();
822
823     // If the concurrent thread will want the code block's hash, then compute it here
824     // synchronously.
825     if (Options::alwaysComputeHash())
826         hash();
827
828     if (Options::dumpGeneratedBytecodes())
829         dumpBytecode();
830     
831     heap()->m_codeBlocks->add(this);
832     heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
833     
834     return true;
835 }
836
837 CodeBlock::~CodeBlock()
838 {
839     if (m_vm->m_perBytecodeProfiler)
840         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
841
842     if (unlinkedCodeBlock()->didOptimize() == MixedTriState)
843         unlinkedCodeBlock()->setDidOptimize(FalseTriState);
844
845 #if ENABLE(VERBOSE_VALUE_PROFILE)
846     dumpValueProfiles();
847 #endif
848
849     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
850     // Consider that two CodeBlocks become unreachable at the same time. There
851     // is no guarantee about the order in which the CodeBlocks are destroyed.
852     // So, if we don't remove incoming calls, and get destroyed before the
853     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
854     // destructor will try to remove nodes from our (no longer valid) linked list.
855     unlinkIncomingCalls();
856     
857     // Note that our outgoing calls will be removed from other CodeBlocks'
858     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
859     // destructors.
860
861 #if ENABLE(JIT)
862     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
863         StructureStubInfo* stub = *iter;
864         stub->aboutToDie();
865         stub->deref();
866     }
867 #endif // ENABLE(JIT)
868 }
869
870 bool CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIndentifierSetEntry>& constants)
871 {
872     auto scope = DECLARE_THROW_SCOPE(vm);
873     JSGlobalObject* globalObject = m_globalObject.get();
874     ExecState* exec = globalObject->globalExec();
875
876     for (const auto& entry : constants) {
877         Structure* setStructure = globalObject->setStructure();
878         RETURN_IF_EXCEPTION(scope, false);
879         JSSet* jsSet = JSSet::create(exec, vm, setStructure);
880         RETURN_IF_EXCEPTION(scope, false);
881
882         const IdentifierSet& set = entry.first;
883         for (auto& setEntry : set) {
884             JSString* jsString = jsOwnedString(&vm, setEntry.get());
885             jsSet->add(exec, JSValue(jsString));
886             RETURN_IF_EXCEPTION(scope, false);
887         }
888         m_constantRegisters[entry.second].set(vm, this, JSValue(jsSet));
889     }
890     return true;
891 }
892
893 bool CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
894 {
895     auto scope = DECLARE_THROW_SCOPE(*m_vm);
896     JSGlobalObject* globalObject = m_globalObject.get();
897     ExecState* exec = globalObject->globalExec();
898
899     ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
900     size_t count = constants.size();
901     m_constantRegisters.resizeToFit(count);
902     bool hasTypeProfiler = !!m_vm->typeProfiler();
903     for (size_t i = 0; i < count; i++) {
904         JSValue constant = constants[i].get();
905
906         if (!constant.isEmpty()) {
907             if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*m_vm, constant)) {
908                 if (hasTypeProfiler) {
909                     ConcurrentJSLocker locker(symbolTable->m_lock);
910                     symbolTable->prepareForTypeProfiling(locker);
911                 }
912
913                 SymbolTable* clone = symbolTable->cloneScopePart(*m_vm);
914                 if (wasCompiledWithDebuggingOpcodes())
915                     clone->setRareDataCodeBlock(this);
916
917                 constant = clone;
918             } else if (isTemplateRegistryKey(*m_vm, constant)) {
919                 auto* templateObject = globalObject->templateRegistry().getTemplateObject(exec, jsCast<JSTemplateRegistryKey*>(constant));
920                 RETURN_IF_EXCEPTION(scope, false);
921                 constant = templateObject;
922             }
923         }
924
925         m_constantRegisters[i].set(*m_vm, this, constant);
926     }
927
928     m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
929
930     return true;
931 }
932
933 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
934 {
935     m_alternative.set(vm, this, alternative);
936 }
937
938 void CodeBlock::setNumParameters(int newValue)
939 {
940     m_numParameters = newValue;
941
942     m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
943 }
944
945 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
946 {
947 #if ENABLE(FTL_JIT)
948     if (jitType() != JITCode::DFGJIT)
949         return 0;
950     DFG::JITCode* jitCode = m_jitCode->dfg();
951     return jitCode->osrEntryBlock();
952 #else // ENABLE(FTL_JIT)
953     return 0;
954 #endif // ENABLE(FTL_JIT)
955 }
956
957 void CodeBlock::visitWeakly(SlotVisitor& visitor)
958 {
959     ConcurrentJSLocker locker(m_lock);
960     if (m_visitWeaklyHasBeenCalled)
961         return;
962     
963     m_visitWeaklyHasBeenCalled = true;
964
965     if (Heap::isMarkedConcurrently(this))
966         return;
967
968     if (shouldVisitStrongly(locker)) {
969         visitor.appendUnbarriered(this);
970         return;
971     }
972     
973     // There are two things that may use unconditional finalizers: inline cache clearing
974     // and jettisoning. The probability of us wanting to do at least one of those things
975     // is probably quite close to 1. So we add one no matter what and when it runs, it
976     // figures out whether it has any work to do.
977     visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
978
979     if (!JITCode::isOptimizingJIT(jitType()))
980         return;
981
982     // If we jettison ourselves we'll install our alternative, so make sure that it
983     // survives GC even if we don't.
984     visitor.append(m_alternative);
985     
986     // There are two things that we use weak reference harvesters for: DFG fixpoint for
987     // jettisoning, and trying to find structures that would be live based on some
988     // inline cache. So it makes sense to register them regardless.
989     visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
990
991 #if ENABLE(DFG_JIT)
992     // We get here if we're live in the sense that our owner executable is live,
993     // but we're not yet live for sure in another sense: we may yet decide that this
994     // code block should be jettisoned based on its outgoing weak references being
995     // stale. Set a flag to indicate that we're still assuming that we're dead, and
996     // perform one round of determining if we're live. The GC may determine, based on
997     // either us marking additional objects, or by other objects being marked for
998     // other reasons, that this iteration should run again; it will notify us of this
999     // decision by calling harvestWeakReferences().
1000
1001     m_allTransitionsHaveBeenMarked = false;
1002     propagateTransitions(locker, visitor);
1003
1004     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
1005     determineLiveness(locker, visitor);
1006 #endif // ENABLE(DFG_JIT)
1007 }
1008
1009 size_t CodeBlock::estimatedSize(JSCell* cell)
1010 {
1011     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
1012     size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
1013     if (thisObject->m_jitCode)
1014         extraMemoryAllocated += thisObject->m_jitCode->size();
1015     return Base::estimatedSize(cell) + extraMemoryAllocated;
1016 }
1017
1018 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
1019 {
1020     CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
1021     ASSERT_GC_OBJECT_INHERITS(thisObject, info());
1022     JSCell::visitChildren(thisObject, visitor);
1023     thisObject->visitChildren(visitor);
1024 }
1025
1026 void CodeBlock::visitChildren(SlotVisitor& visitor)
1027 {
1028     ConcurrentJSLocker locker(m_lock);
1029     // There are two things that may use unconditional finalizers: inline cache clearing
1030     // and jettisoning. The probability of us wanting to do at least one of those things
1031     // is probably quite close to 1. So we add one no matter what and when it runs, it
1032     // figures out whether it has any work to do.
1033     visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
1034
1035     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
1036         visitor.appendUnbarriered(otherBlock);
1037
1038     if (m_jitCode)
1039         visitor.reportExtraMemoryVisited(m_jitCode->size());
1040     if (m_instructions.size()) {
1041         unsigned refCount = m_instructions.refCount();
1042         if (!refCount) {
1043             dataLog("CodeBlock: ", RawPointer(this), "\n");
1044             dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n");
1045             dataLog("refCount: ", refCount, "\n");
1046             RELEASE_ASSERT_NOT_REACHED();
1047         }
1048         visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
1049     }
1050
1051     stronglyVisitStrongReferences(locker, visitor);
1052     stronglyVisitWeakReferences(locker, visitor);
1053
1054     m_allTransitionsHaveBeenMarked = false;
1055     propagateTransitions(locker, visitor);
1056 }
1057
1058 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
1059 {
1060     if (Options::forceCodeBlockLiveness())
1061         return true;
1062
1063     if (shouldJettisonDueToOldAge(locker))
1064         return false;
1065
1066     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1067     // their weak references go stale. So if a basline JIT CodeBlock gets
1068     // scanned, we can assume that this means that it's live.
1069     if (!JITCode::isOptimizingJIT(jitType()))
1070         return true;
1071
1072     return false;
1073 }
1074
1075 bool CodeBlock::shouldJettisonDueToWeakReference()
1076 {
1077     if (!JITCode::isOptimizingJIT(jitType()))
1078         return false;
1079     return !Heap::isMarked(this);
1080 }
1081
1082 static std::chrono::milliseconds timeToLive(JITCode::JITType jitType)
1083 {
1084     if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1085         switch (jitType) {
1086         case JITCode::InterpreterThunk:
1087             return std::chrono::milliseconds(10);
1088         case JITCode::BaselineJIT:
1089             return std::chrono::milliseconds(10 + 20);
1090         case JITCode::DFGJIT:
1091             return std::chrono::milliseconds(40);
1092         case JITCode::FTLJIT:
1093             return std::chrono::milliseconds(120);
1094         default:
1095             return std::chrono::milliseconds::max();
1096         }
1097     }
1098
1099     switch (jitType) {
1100     case JITCode::InterpreterThunk:
1101         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5));
1102     case JITCode::BaselineJIT:
1103         // Effectively 10 additional seconds, since BaselineJIT and
1104         // InterpreterThunk share a CodeBlock.
1105         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5 + 10));
1106     case JITCode::DFGJIT:
1107         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(20));
1108     case JITCode::FTLJIT:
1109         return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(60));
1110     default:
1111         return std::chrono::milliseconds::max();
1112     }
1113 }
1114
1115 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1116 {
1117     if (Heap::isMarkedConcurrently(this))
1118         return false;
1119
1120     if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1121         return true;
1122     
1123     if (timeSinceCreation() < timeToLive(jitType()))
1124         return false;
1125     
1126     return true;
1127 }
1128
1129 #if ENABLE(DFG_JIT)
1130 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
1131 {
1132     if (transition.m_codeOrigin && !Heap::isMarkedConcurrently(transition.m_codeOrigin.get()))
1133         return false;
1134     
1135     if (!Heap::isMarkedConcurrently(transition.m_from.get()))
1136         return false;
1137     
1138     return true;
1139 }
1140 #endif // ENABLE(DFG_JIT)
1141
1142 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1143 {
1144     UNUSED_PARAM(visitor);
1145
1146     if (m_allTransitionsHaveBeenMarked)
1147         return;
1148
1149     bool allAreMarkedSoFar = true;
1150         
1151     Interpreter* interpreter = m_vm->interpreter;
1152     if (jitType() == JITCode::InterpreterThunk) {
1153         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1154         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1155             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
1156             switch (interpreter->getOpcodeID(instruction[0])) {
1157             case op_put_by_id: {
1158                 StructureID oldStructureID = instruction[4].u.structureID;
1159                 StructureID newStructureID = instruction[6].u.structureID;
1160                 if (!oldStructureID || !newStructureID)
1161                     break;
1162                 Structure* oldStructure =
1163                     m_vm->heap.structureIDTable().get(oldStructureID);
1164                 Structure* newStructure =
1165                     m_vm->heap.structureIDTable().get(newStructureID);
1166                 if (Heap::isMarkedConcurrently(oldStructure))
1167                     visitor.appendUnbarriered(newStructure);
1168                 else
1169                     allAreMarkedSoFar = false;
1170                 break;
1171             }
1172             default:
1173                 break;
1174             }
1175         }
1176     }
1177
1178 #if ENABLE(JIT)
1179     if (JITCode::isJIT(jitType())) {
1180         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
1181             allAreMarkedSoFar &= (*iter)->propagateTransitions(visitor);
1182     }
1183 #endif // ENABLE(JIT)
1184     
1185 #if ENABLE(DFG_JIT)
1186     if (JITCode::isOptimizingJIT(jitType())) {
1187         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1188         for (auto& weakReference : dfgCommon->weakStructureReferences)
1189             allAreMarkedSoFar &= weakReference->markIfCheap(visitor);
1190
1191         for (auto& transition : dfgCommon->transitions) {
1192             if (shouldMarkTransition(transition)) {
1193                 // If the following three things are live, then the target of the
1194                 // transition is also live:
1195                 //
1196                 // - This code block. We know it's live already because otherwise
1197                 //   we wouldn't be scanning ourselves.
1198                 //
1199                 // - The code origin of the transition. Transitions may arise from
1200                 //   code that was inlined. They are not relevant if the user's
1201                 //   object that is required for the inlinee to run is no longer
1202                 //   live.
1203                 //
1204                 // - The source of the transition. The transition checks if some
1205                 //   heap location holds the source, and if so, stores the target.
1206                 //   Hence the source must be live for the transition to be live.
1207                 //
1208                 // We also short-circuit the liveness if the structure is harmless
1209                 // to mark (i.e. its global object and prototype are both already
1210                 // live).
1211
1212                 visitor.append(transition.m_to);
1213             } else
1214                 allAreMarkedSoFar = false;
1215         }
1216     }
1217 #endif // ENABLE(DFG_JIT)
1218     
1219     if (allAreMarkedSoFar)
1220         m_allTransitionsHaveBeenMarked = true;
1221 }
1222
1223 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1224 {
1225     UNUSED_PARAM(visitor);
1226     
1227 #if ENABLE(DFG_JIT)
1228     // Check if we have any remaining work to do.
1229     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1230     if (dfgCommon->livenessHasBeenProved)
1231         return;
1232     
1233     // Now check all of our weak references. If all of them are live, then we
1234     // have proved liveness and so we scan our strong references. If at end of
1235     // GC we still have not proved liveness, then this code block is toast.
1236     bool allAreLiveSoFar = true;
1237     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1238         JSCell* reference = dfgCommon->weakReferences[i].get();
1239         ASSERT(!jsDynamicCast<CodeBlock*>(*reference->vm(), reference));
1240         if (!Heap::isMarkedConcurrently(reference)) {
1241             allAreLiveSoFar = false;
1242             break;
1243         }
1244     }
1245     if (allAreLiveSoFar) {
1246         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1247             if (!Heap::isMarkedConcurrently(dfgCommon->weakStructureReferences[i].get())) {
1248                 allAreLiveSoFar = false;
1249                 break;
1250             }
1251         }
1252     }
1253     
1254     // If some weak references are dead, then this fixpoint iteration was
1255     // unsuccessful.
1256     if (!allAreLiveSoFar)
1257         return;
1258     
1259     // All weak references are live. Record this information so we don't
1260     // come back here again, and scan the strong references.
1261     dfgCommon->livenessHasBeenProved = true;
1262     visitor.appendUnbarriered(this);
1263 #endif // ENABLE(DFG_JIT)
1264 }
1265
1266 void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
1267 {
1268     CodeBlock* codeBlock =
1269         bitwise_cast<CodeBlock*>(
1270             bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
1271     
1272     codeBlock->propagateTransitions(NoLockingNecessary, visitor);
1273     codeBlock->determineLiveness(NoLockingNecessary, visitor);
1274 }
1275
1276 void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
1277 {
1278     instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
1279     instruction[4].u.pointer = nullptr;
1280     instruction[5].u.pointer = nullptr;
1281     instruction[6].u.pointer = nullptr;
1282 }
1283
1284 void CodeBlock::finalizeLLIntInlineCaches()
1285 {
1286     Interpreter* interpreter = m_vm->interpreter;
1287     const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1288     for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1289         Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
1290         switch (interpreter->getOpcodeID(curInstruction[0])) {
1291         case op_get_by_id:
1292         case op_get_by_id_proto_load:
1293         case op_get_by_id_unset: {
1294             StructureID oldStructureID = curInstruction[4].u.structureID;
1295             if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
1296                 break;
1297             if (Options::verboseOSR())
1298                 dataLogF("Clearing LLInt property access.\n");
1299             clearLLIntGetByIdCache(curInstruction);
1300             break;
1301         }
1302         case op_put_by_id: {
1303             StructureID oldStructureID = curInstruction[4].u.structureID;
1304             StructureID newStructureID = curInstruction[6].u.structureID;
1305             StructureChain* chain = curInstruction[7].u.structureChain.get();
1306             if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
1307                 (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
1308                 (!chain || Heap::isMarked(chain)))
1309                 break;
1310             if (Options::verboseOSR())
1311                 dataLogF("Clearing LLInt put transition.\n");
1312             curInstruction[4].u.structureID = 0;
1313             curInstruction[5].u.operand = 0;
1314             curInstruction[6].u.structureID = 0;
1315             curInstruction[7].u.structureChain.clear();
1316             break;
1317         }
1318         case op_get_array_length:
1319             break;
1320         case op_to_this:
1321             if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
1322                 break;
1323             if (Options::verboseOSR())
1324                 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
1325             curInstruction[2].u.structure.clear();
1326             curInstruction[3].u.toThisStatus = merge(
1327                 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
1328             break;
1329         case op_create_this: {
1330             auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
1331             if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1332                 break;
1333             JSCell* cachedFunction = cacheWriteBarrier.get();
1334             if (Heap::isMarked(cachedFunction))
1335                 break;
1336             if (Options::verboseOSR())
1337                 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1338             cacheWriteBarrier.clear();
1339             break;
1340         }
1341         case op_resolve_scope: {
1342             // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1343             // are for outer functions, and we refer to those functions strongly, and they refer
1344             // to the symbol table strongly. But it's nice to be on the safe side.
1345             WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
1346             if (!symbolTable || Heap::isMarked(symbolTable.get()))
1347                 break;
1348             if (Options::verboseOSR())
1349                 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1350             symbolTable.clear();
1351             break;
1352         }
1353         case op_get_from_scope:
1354         case op_put_to_scope: {
1355             GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
1356             if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 
1357                 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1358                 continue;
1359             WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
1360             if (!structure || Heap::isMarked(structure.get()))
1361                 break;
1362             if (Options::verboseOSR())
1363                 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1364             structure.clear();
1365             break;
1366         }
1367         default:
1368             OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0]);
1369             ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1370         }
1371     }
1372
1373     // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1374     // then cleared the cache without GCing in between.
1375     m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1376         return !Heap::isMarked(pair.key);
1377     });
1378
1379     for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
1380         if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
1381             if (Options::verboseOSR())
1382                 dataLog("Clearing LLInt call from ", *this, "\n");
1383             m_llintCallLinkInfos[i].unlink();
1384         }
1385         if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
1386             m_llintCallLinkInfos[i].lastSeenCallee.clear();
1387     }
1388 }
1389
1390 void CodeBlock::finalizeBaselineJITInlineCaches()
1391 {
1392 #if ENABLE(JIT)
1393     for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
1394         (*iter)->visitWeak(*vm());
1395
1396     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
1397         StructureStubInfo& stubInfo = **iter;
1398         stubInfo.visitWeakReferences(this);
1399     }
1400 #endif
1401 }
1402
1403 void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
1404 {
1405     CodeBlock* codeBlock = bitwise_cast<CodeBlock*>(
1406         bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
1407     
1408     codeBlock->updateAllPredictions();
1409     
1410     if (!Heap::isMarked(codeBlock)) {
1411         if (codeBlock->shouldJettisonDueToWeakReference())
1412             codeBlock->jettison(Profiler::JettisonDueToWeakReference);
1413         else
1414             codeBlock->jettison(Profiler::JettisonDueToOldAge);
1415         return;
1416     }
1417
1418     if (JITCode::couldBeInterpreted(codeBlock->jitType()))
1419         codeBlock->finalizeLLIntInlineCaches();
1420
1421 #if ENABLE(JIT)
1422     if (!!codeBlock->jitCode())
1423         codeBlock->finalizeBaselineJITInlineCaches();
1424 #endif
1425 }
1426
1427 void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
1428 {
1429 #if ENABLE(JIT)
1430     if (JITCode::isJIT(jitType()))
1431         toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
1432 #else
1433     UNUSED_PARAM(result);
1434 #endif
1435 }
1436
1437 void CodeBlock::getStubInfoMap(StubInfoMap& result)
1438 {
1439     ConcurrentJSLocker locker(m_lock);
1440     getStubInfoMap(locker, result);
1441 }
1442
1443 void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
1444 {
1445 #if ENABLE(JIT)
1446     if (JITCode::isJIT(jitType()))
1447         toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
1448 #else
1449     UNUSED_PARAM(result);
1450 #endif
1451 }
1452
1453 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
1454 {
1455     ConcurrentJSLocker locker(m_lock);
1456     getCallLinkInfoMap(locker, result);
1457 }
1458
1459 void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
1460 {
1461 #if ENABLE(JIT)
1462     if (JITCode::isJIT(jitType())) {
1463         for (auto* byValInfo : m_byValInfos)
1464             result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
1465     }
1466 #else
1467     UNUSED_PARAM(result);
1468 #endif
1469 }
1470
1471 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
1472 {
1473     ConcurrentJSLocker locker(m_lock);
1474     getByValInfoMap(locker, result);
1475 }
1476
1477 #if ENABLE(JIT)
1478 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1479 {
1480     ConcurrentJSLocker locker(m_lock);
1481     return m_stubInfos.add(accessType);
1482 }
1483
1484 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
1485 {
1486     return m_addICs.add(arithProfile);
1487 }
1488
1489 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
1490 {
1491     return m_mulICs.add(arithProfile);
1492 }
1493
1494 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
1495 {
1496     return m_subICs.add(arithProfile);
1497 }
1498
1499 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
1500 {
1501     return m_negICs.add(arithProfile);
1502 }
1503
1504 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1505 {
1506     for (StructureStubInfo* stubInfo : m_stubInfos) {
1507         if (stubInfo->codeOrigin == codeOrigin)
1508             return stubInfo;
1509     }
1510     return nullptr;
1511 }
1512
1513 ByValInfo* CodeBlock::addByValInfo()
1514 {
1515     ConcurrentJSLocker locker(m_lock);
1516     return m_byValInfos.add();
1517 }
1518
1519 CallLinkInfo* CodeBlock::addCallLinkInfo()
1520 {
1521     ConcurrentJSLocker locker(m_lock);
1522     return m_callLinkInfos.add();
1523 }
1524
1525 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1526 {
1527     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
1528         if ((*iter)->codeOrigin() == CodeOrigin(index))
1529             return *iter;
1530     }
1531     return nullptr;
1532 }
1533
1534 void CodeBlock::resetJITData()
1535 {
1536     RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1537     ConcurrentJSLocker locker(m_lock);
1538     
1539     // We can clear these because no other thread will have references to any stub infos, call
1540     // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1541     // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
1542     // don't have JIT code.
1543     m_stubInfos.clear();
1544     m_callLinkInfos.clear();
1545     m_byValInfos.clear();
1546     
1547     // We can clear this because the DFG's queries to these data structures are guarded by whether
1548     // there is JIT code.
1549     m_rareCaseProfiles.clear();
1550 }
1551 #endif
1552
1553 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1554 {
1555     // We strongly visit OSR exits targets because we don't want to deal with
1556     // the complexity of generating an exit target CodeBlock on demand and
1557     // guaranteeing that it matches the details of the CodeBlock we compiled
1558     // the OSR exit against.
1559
1560     visitor.append(m_alternative);
1561
1562 #if ENABLE(DFG_JIT)
1563     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1564     if (dfgCommon->inlineCallFrames) {
1565         for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1566             ASSERT(inlineCallFrame->baselineCodeBlock);
1567             visitor.append(inlineCallFrame->baselineCodeBlock);
1568         }
1569     }
1570 #endif
1571 }
1572
1573 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1574 {
1575     UNUSED_PARAM(locker);
1576     
1577     visitor.append(m_globalObject);
1578     visitor.append(m_ownerExecutable);
1579     visitor.append(m_unlinkedCode);
1580     if (m_rareData)
1581         m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1582     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1583     for (auto& functionExpr : m_functionExprs)
1584         visitor.append(functionExpr);
1585     for (auto& functionDecl : m_functionDecls)
1586         visitor.append(functionDecl);
1587     for (auto& objectAllocationProfile : m_objectAllocationProfiles)
1588         objectAllocationProfile.visitAggregate(visitor);
1589
1590 #if ENABLE(JIT)
1591     for (ByValInfo* byValInfo : m_byValInfos)
1592         visitor.append(byValInfo->cachedSymbol);
1593 #endif
1594
1595 #if ENABLE(DFG_JIT)
1596     if (JITCode::isOptimizingJIT(jitType()))
1597         visitOSRExitTargets(locker, visitor);
1598 #endif
1599 }
1600
1601 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1602 {
1603     UNUSED_PARAM(visitor);
1604
1605 #if ENABLE(DFG_JIT)
1606     if (!JITCode::isOptimizingJIT(jitType()))
1607         return;
1608     
1609     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1610
1611     for (auto& transition : dfgCommon->transitions) {
1612         if (!!transition.m_codeOrigin)
1613             visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1614         visitor.append(transition.m_from);
1615         visitor.append(transition.m_to);
1616     }
1617
1618     for (auto& weakReference : dfgCommon->weakReferences)
1619         visitor.append(weakReference);
1620
1621     for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1622         visitor.append(weakStructureReference);
1623
1624     dfgCommon->livenessHasBeenProved = true;
1625 #endif    
1626 }
1627
1628 CodeBlock* CodeBlock::baselineAlternative()
1629 {
1630 #if ENABLE(JIT)
1631     CodeBlock* result = this;
1632     while (result->alternative())
1633         result = result->alternative();
1634     RELEASE_ASSERT(result);
1635     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
1636     return result;
1637 #else
1638     return this;
1639 #endif
1640 }
1641
1642 CodeBlock* CodeBlock::baselineVersion()
1643 {
1644 #if ENABLE(JIT)
1645     if (JITCode::isBaselineCode(jitType()))
1646         return this;
1647     CodeBlock* result = replacement();
1648     if (!result) {
1649         // This can happen if we're creating the original CodeBlock for an executable.
1650         // Assume that we're the baseline CodeBlock.
1651         RELEASE_ASSERT(jitType() == JITCode::None);
1652         return this;
1653     }
1654     result = result->baselineAlternative();
1655     return result;
1656 #else
1657     return this;
1658 #endif
1659 }
1660
1661 #if ENABLE(JIT)
1662 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
1663 {
1664     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
1665 }
1666
1667 bool CodeBlock::hasOptimizedReplacement()
1668 {
1669     return hasOptimizedReplacement(jitType());
1670 }
1671 #endif
1672
1673 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1674 {
1675     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1676     return handlerForIndex(bytecodeOffset, requiredHandler);
1677 }
1678
1679 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1680 {
1681     if (!m_rareData)
1682         return 0;
1683     return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1684 }
1685
1686 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1687 {
1688 #if ENABLE(DFG_JIT)
1689     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1690     RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1691     ASSERT(!!handlerForIndex(originalCallSite.bits()));
1692     CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1693     return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1694 #else
1695     // We never create new on-the-fly exception handling
1696     // call sites outside the DFG/FTL inline caches.
1697     UNUSED_PARAM(originalCallSite);
1698     RELEASE_ASSERT_NOT_REACHED();
1699     return CallSiteIndex(0u);
1700 #endif
1701 }
1702
1703 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1704 {
1705     RELEASE_ASSERT(m_rareData);
1706     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1707     unsigned index = callSiteIndex.bits();
1708     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1709         HandlerInfo& handler = exceptionHandlers[i];
1710         if (handler.start <= index && handler.end > index) {
1711             exceptionHandlers.remove(i);
1712             return;
1713         }
1714     }
1715
1716     RELEASE_ASSERT_NOT_REACHED();
1717 }
1718
1719 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1720 {
1721     RELEASE_ASSERT(bytecodeOffset < instructions().size());
1722     return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1723 }
1724
1725 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1726 {
1727     int divot;
1728     int startOffset;
1729     int endOffset;
1730     unsigned line;
1731     unsigned column;
1732     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1733     return column;
1734 }
1735
1736 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1737 {
1738     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1739     divot += m_sourceOffset;
1740     column += line ? 1 : firstLineColumnOffset();
1741     line += ownerScriptExecutable()->firstLine();
1742 }
1743
1744 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1745 {
1746     Interpreter* interpreter = vm()->interpreter;
1747     const Instruction* begin = instructions().begin();
1748     const Instruction* end = instructions().end();
1749     for (const Instruction* it = begin; it != end;) {
1750         OpcodeID opcodeID = interpreter->getOpcodeID(*it);
1751         if (opcodeID == op_debug) {
1752             unsigned bytecodeOffset = it - begin;
1753             int unused;
1754             unsigned opDebugLine;
1755             unsigned opDebugColumn;
1756             expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
1757             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1758                 return true;
1759         }
1760         it += opcodeLengths[opcodeID];
1761     }
1762     return false;
1763 }
1764
1765 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1766 {
1767     ConcurrentJSLocker locker(m_lock);
1768
1769     m_rareCaseProfiles.shrinkToFit();
1770     
1771     if (shrinkMode == EarlyShrink) {
1772         m_constantRegisters.shrinkToFit();
1773         m_constantsSourceCodeRepresentation.shrinkToFit();
1774         
1775         if (m_rareData) {
1776             m_rareData->m_switchJumpTables.shrinkToFit();
1777             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1778         }
1779     } // else don't shrink these, because we would have already pointed pointers into these tables.
1780 }
1781
1782 #if ENABLE(JIT)
1783 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1784 {
1785     noticeIncomingCall(callerFrame);
1786     m_incomingCalls.push(incoming);
1787 }
1788
1789 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1790 {
1791     noticeIncomingCall(callerFrame);
1792     m_incomingPolymorphicCalls.push(incoming);
1793 }
1794 #endif // ENABLE(JIT)
1795
1796 void CodeBlock::unlinkIncomingCalls()
1797 {
1798     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1799         m_incomingLLIntCalls.begin()->unlink();
1800 #if ENABLE(JIT)
1801     while (m_incomingCalls.begin() != m_incomingCalls.end())
1802         m_incomingCalls.begin()->unlink(*vm());
1803     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
1804         m_incomingPolymorphicCalls.begin()->unlink(*vm());
1805 #endif // ENABLE(JIT)
1806 }
1807
1808 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1809 {
1810     noticeIncomingCall(callerFrame);
1811     m_incomingLLIntCalls.push(incoming);
1812 }
1813
1814 CodeBlock* CodeBlock::newReplacement()
1815 {
1816     return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
1817 }
1818
1819 #if ENABLE(JIT)
1820 CodeBlock* CodeBlock::replacement()
1821 {
1822     const ClassInfo* classInfo = this->classInfo(*vm());
1823
1824     if (classInfo == FunctionCodeBlock::info())
1825         return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
1826
1827     if (classInfo == EvalCodeBlock::info())
1828         return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1829
1830     if (classInfo == ProgramCodeBlock::info())
1831         return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1832
1833     if (classInfo == ModuleProgramCodeBlock::info())
1834         return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1835
1836     RELEASE_ASSERT_NOT_REACHED();
1837     return nullptr;
1838 }
1839
1840 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1841 {
1842     const ClassInfo* classInfo = this->classInfo(*vm());
1843
1844     if (classInfo == FunctionCodeBlock::info()) {
1845         if (m_isConstructor)
1846             return DFG::functionForConstructCapabilityLevel(this);
1847         return DFG::functionForCallCapabilityLevel(this);
1848     }
1849
1850     if (classInfo == EvalCodeBlock::info())
1851         return DFG::evalCapabilityLevel(this);
1852
1853     if (classInfo == ProgramCodeBlock::info())
1854         return DFG::programCapabilityLevel(this);
1855
1856     if (classInfo == ModuleProgramCodeBlock::info())
1857         return DFG::programCapabilityLevel(this);
1858
1859     RELEASE_ASSERT_NOT_REACHED();
1860     return DFG::CannotCompile;
1861 }
1862
1863 #endif // ENABLE(JIT)
1864
1865 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1866 {
1867 #if !ENABLE(DFG_JIT)
1868     UNUSED_PARAM(mode);
1869     UNUSED_PARAM(detail);
1870 #endif
1871     
1872     CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1873
1874     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1875     
1876 #if ENABLE(DFG_JIT)
1877     if (DFG::shouldDumpDisassembly()) {
1878         dataLog("Jettisoning ", *this);
1879         if (mode == CountReoptimization)
1880             dataLog(" and counting reoptimization");
1881         dataLog(" due to ", reason);
1882         if (detail)
1883             dataLog(", ", *detail);
1884         dataLog(".\n");
1885     }
1886     
1887     if (reason == Profiler::JettisonDueToWeakReference) {
1888         if (DFG::shouldDumpDisassembly()) {
1889             dataLog(*this, " will be jettisoned because of the following dead references:\n");
1890             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1891             for (auto& transition : dfgCommon->transitions) {
1892                 JSCell* origin = transition.m_codeOrigin.get();
1893                 JSCell* from = transition.m_from.get();
1894                 JSCell* to = transition.m_to.get();
1895                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
1896                     continue;
1897                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1898             }
1899             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1900                 JSCell* weak = dfgCommon->weakReferences[i].get();
1901                 if (Heap::isMarked(weak))
1902                     continue;
1903                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
1904             }
1905         }
1906     }
1907 #endif // ENABLE(DFG_JIT)
1908
1909     DeferGCForAWhile deferGC(*heap());
1910     
1911     // We want to accomplish two things here:
1912     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1913     //    we should OSR exit at the top of the next bytecode instruction after the return.
1914     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
1915
1916 #if ENABLE(DFG_JIT)
1917     if (reason != Profiler::JettisonDueToOldAge) {
1918         if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
1919             compilation->setJettisonReason(reason, detail);
1920         
1921         // This accomplishes (1), and does its own book-keeping about whether it has already happened.
1922         if (!jitCode()->dfgCommon()->invalidate()) {
1923             // We've already been invalidated.
1924             RELEASE_ASSERT(this != replacement() || (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
1925             return;
1926         }
1927     }
1928     
1929     if (DFG::shouldDumpDisassembly())
1930         dataLog("    Did invalidate ", *this, "\n");
1931     
1932     // Count the reoptimization if that's what the user wanted.
1933     if (mode == CountReoptimization) {
1934         // FIXME: Maybe this should call alternative().
1935         // https://bugs.webkit.org/show_bug.cgi?id=123677
1936         baselineAlternative()->countReoptimization();
1937         if (DFG::shouldDumpDisassembly())
1938             dataLog("    Did count reoptimization for ", *this, "\n");
1939     }
1940     
1941     if (this != replacement()) {
1942         // This means that we were never the entrypoint. This can happen for OSR entry code
1943         // blocks.
1944         return;
1945     }
1946
1947     if (alternative())
1948         alternative()->optimizeAfterWarmUp();
1949
1950     if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
1951         tallyFrequentExitSites();
1952 #endif // ENABLE(DFG_JIT)
1953
1954     // Jettison can happen during GC. We don't want to install code to a dead executable
1955     // because that would add a dead object to the remembered set.
1956     if (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
1957         return;
1958
1959     // This accomplishes (2).
1960     ownerScriptExecutable()->installCode(
1961         m_globalObject->vm(), alternative(), codeType(), specializationKind());
1962
1963 #if ENABLE(DFG_JIT)
1964     if (DFG::shouldDumpDisassembly())
1965         dataLog("    Did install baseline version of ", *this, "\n");
1966 #endif // ENABLE(DFG_JIT)
1967 }
1968
1969 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
1970 {
1971     if (!codeOrigin.inlineCallFrame)
1972         return globalObject();
1973     return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
1974 }
1975
1976 class RecursionCheckFunctor {
1977 public:
1978     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
1979         : m_startCallFrame(startCallFrame)
1980         , m_codeBlock(codeBlock)
1981         , m_depthToCheck(depthToCheck)
1982         , m_foundStartCallFrame(false)
1983         , m_didRecurse(false)
1984     { }
1985
1986     StackVisitor::Status operator()(StackVisitor& visitor) const
1987     {
1988         CallFrame* currentCallFrame = visitor->callFrame();
1989
1990         if (currentCallFrame == m_startCallFrame)
1991             m_foundStartCallFrame = true;
1992
1993         if (m_foundStartCallFrame) {
1994             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
1995                 m_didRecurse = true;
1996                 return StackVisitor::Done;
1997             }
1998
1999             if (!m_depthToCheck--)
2000                 return StackVisitor::Done;
2001         }
2002
2003         return StackVisitor::Continue;
2004     }
2005
2006     bool didRecurse() const { return m_didRecurse; }
2007
2008 private:
2009     CallFrame* m_startCallFrame;
2010     CodeBlock* m_codeBlock;
2011     mutable unsigned m_depthToCheck;
2012     mutable bool m_foundStartCallFrame;
2013     mutable bool m_didRecurse;
2014 };
2015
2016 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2017 {
2018     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2019     
2020     if (Options::verboseCallLink())
2021         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2022     
2023 #if ENABLE(DFG_JIT)
2024     if (!m_shouldAlwaysBeInlined)
2025         return;
2026     
2027     if (!callerCodeBlock) {
2028         m_shouldAlwaysBeInlined = false;
2029         if (Options::verboseCallLink())
2030             dataLog("    Clearing SABI because caller is native.\n");
2031         return;
2032     }
2033
2034     if (!hasBaselineJITProfiling())
2035         return;
2036
2037     if (!DFG::mightInlineFunction(this))
2038         return;
2039
2040     if (!canInline(capabilityLevelState()))
2041         return;
2042     
2043     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2044         m_shouldAlwaysBeInlined = false;
2045         if (Options::verboseCallLink())
2046             dataLog("    Clearing SABI because caller is too large.\n");
2047         return;
2048     }
2049
2050     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2051         // If the caller is still in the interpreter, then we can't expect inlining to
2052         // happen anytime soon. Assume it's profitable to optimize it separately. This
2053         // ensures that a function is SABI only if it is called no more frequently than
2054         // any of its callers.
2055         m_shouldAlwaysBeInlined = false;
2056         if (Options::verboseCallLink())
2057             dataLog("    Clearing SABI because caller is in LLInt.\n");
2058         return;
2059     }
2060     
2061     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2062         m_shouldAlwaysBeInlined = false;
2063         if (Options::verboseCallLink())
2064             dataLog("    Clearing SABI bcause caller was already optimized.\n");
2065         return;
2066     }
2067     
2068     if (callerCodeBlock->codeType() != FunctionCode) {
2069         // If the caller is either eval or global code, assume that that won't be
2070         // optimized anytime soon. For eval code this is particularly true since we
2071         // delay eval optimization by a *lot*.
2072         m_shouldAlwaysBeInlined = false;
2073         if (Options::verboseCallLink())
2074             dataLog("    Clearing SABI because caller is not a function.\n");
2075         return;
2076     }
2077
2078     // Recursive calls won't be inlined.
2079     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2080     vm()->topCallFrame->iterate(functor);
2081
2082     if (functor.didRecurse()) {
2083         if (Options::verboseCallLink())
2084             dataLog("    Clearing SABI because recursion was detected.\n");
2085         m_shouldAlwaysBeInlined = false;
2086         return;
2087     }
2088     
2089     if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2090         dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2091         CRASH();
2092     }
2093     
2094     if (canCompile(callerCodeBlock->capabilityLevelState()))
2095         return;
2096     
2097     if (Options::verboseCallLink())
2098         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2099     
2100     m_shouldAlwaysBeInlined = false;
2101 #endif
2102 }
2103
2104 unsigned CodeBlock::reoptimizationRetryCounter() const
2105 {
2106 #if ENABLE(JIT)
2107     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2108     return m_reoptimizationRetryCounter;
2109 #else
2110     return 0;
2111 #endif // ENABLE(JIT)
2112 }
2113
2114 #if ENABLE(JIT)
2115 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
2116 {
2117     m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
2118 }
2119
2120 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
2121 {
2122     m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
2123 }
2124     
2125 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2126 {
2127     static const unsigned cpuRegisterSize = sizeof(void*);
2128     return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
2129
2130 }
2131
2132 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2133 {
2134     return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2135 }
2136
2137 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2138 {
2139     return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
2140 }
2141
2142 void CodeBlock::countReoptimization()
2143 {
2144     m_reoptimizationRetryCounter++;
2145     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2146         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2147 }
2148
2149 unsigned CodeBlock::numberOfDFGCompiles()
2150 {
2151     ASSERT(JITCode::isBaselineCode(jitType()));
2152     if (Options::testTheFTL()) {
2153         if (m_didFailFTLCompilation)
2154             return 1000000;
2155         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2156     }
2157     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2158 }
2159
2160 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2161 {
2162     if (codeType() == EvalCode)
2163         return Options::evalThresholdMultiplier();
2164     
2165     return 1;
2166 }
2167
2168 double CodeBlock::optimizationThresholdScalingFactor()
2169 {
2170     // This expression arises from doing a least-squares fit of
2171     //
2172     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2173     //
2174     // against the data points:
2175     //
2176     //    x       F[x_]
2177     //    10       0.9          (smallest reasonable code block)
2178     //   200       1.0          (typical small-ish code block)
2179     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2180     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2181     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2182     // 10000       6.0          (similar to above)
2183     //
2184     // I achieve the minimization using the following Mathematica code:
2185     //
2186     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2187     //
2188     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2189     //
2190     // solution = 
2191     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2192     //         {a, b, c, d}][[2]]
2193     //
2194     // And the code below (to initialize a, b, c, d) is generated by:
2195     //
2196     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2197     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2198     //
2199     // We've long known the following to be true:
2200     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2201     //   than later.
2202     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2203     //   and sometimes have a large enough threshold that we never optimize them.
2204     // - The difference in cost is not totally linear because (a) just invoking the
2205     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2206     //   in the correlation between instruction count and the actual compilation cost
2207     //   that for those large blocks, the instruction count should not have a strong
2208     //   influence on our threshold.
2209     //
2210     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2211     // example where the heuristics were right (code block in 3d-cube with instruction
2212     // count 320, which got compiled early as it should have been) and one where they were
2213     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2214     // to compile and didn't run often enough to warrant compilation in my opinion), and
2215     // then threw in additional data points that represented my own guess of what our
2216     // heuristics should do for some round-numbered examples.
2217     //
2218     // The expression to which I decided to fit the data arose because I started with an
2219     // affine function, and then did two things: put the linear part in an Abs to ensure
2220     // that the fit didn't end up choosing a negative value of c (which would result in
2221     // the function turning over and going negative for large x) and I threw in a Sqrt
2222     // term because Sqrt represents my intution that the function should be more sensitive
2223     // to small changes in small values of x, but less sensitive when x gets large.
2224     
2225     // Note that the current fit essentially eliminates the linear portion of the
2226     // expression (c == 0.0).
2227     const double a = 0.061504;
2228     const double b = 1.02406;
2229     const double c = 0.0;
2230     const double d = 0.825914;
2231     
2232     double instructionCount = this->instructionCount();
2233     
2234     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2235     
2236     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2237     
2238     result *= codeTypeThresholdMultiplier();
2239     
2240     if (Options::verboseOSR()) {
2241         dataLog(
2242             *this, ": instruction count is ", instructionCount,
2243             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2244             "\n");
2245     }
2246     return result;
2247 }
2248
2249 static int32_t clipThreshold(double threshold)
2250 {
2251     if (threshold < 1.0)
2252         return 1;
2253     
2254     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2255         return std::numeric_limits<int32_t>::max();
2256     
2257     return static_cast<int32_t>(threshold);
2258 }
2259
2260 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2261 {
2262     return clipThreshold(
2263         static_cast<double>(desiredThreshold) *
2264         optimizationThresholdScalingFactor() *
2265         (1 << reoptimizationRetryCounter()));
2266 }
2267
2268 bool CodeBlock::checkIfOptimizationThresholdReached()
2269 {
2270 #if ENABLE(DFG_JIT)
2271     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2272         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2273             == DFG::Worklist::Compiled) {
2274             optimizeNextInvocation();
2275             return true;
2276         }
2277     }
2278 #endif
2279     
2280     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2281 }
2282
2283 void CodeBlock::optimizeNextInvocation()
2284 {
2285     if (Options::verboseOSR())
2286         dataLog(*this, ": Optimizing next invocation.\n");
2287     m_jitExecuteCounter.setNewThreshold(0, this);
2288 }
2289
2290 void CodeBlock::dontOptimizeAnytimeSoon()
2291 {
2292     if (Options::verboseOSR())
2293         dataLog(*this, ": Not optimizing anytime soon.\n");
2294     m_jitExecuteCounter.deferIndefinitely();
2295 }
2296
2297 void CodeBlock::optimizeAfterWarmUp()
2298 {
2299     if (Options::verboseOSR())
2300         dataLog(*this, ": Optimizing after warm-up.\n");
2301 #if ENABLE(DFG_JIT)
2302     m_jitExecuteCounter.setNewThreshold(
2303         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2304 #endif
2305 }
2306
2307 void CodeBlock::optimizeAfterLongWarmUp()
2308 {
2309     if (Options::verboseOSR())
2310         dataLog(*this, ": Optimizing after long warm-up.\n");
2311 #if ENABLE(DFG_JIT)
2312     m_jitExecuteCounter.setNewThreshold(
2313         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2314 #endif
2315 }
2316
2317 void CodeBlock::optimizeSoon()
2318 {
2319     if (Options::verboseOSR())
2320         dataLog(*this, ": Optimizing soon.\n");
2321 #if ENABLE(DFG_JIT)
2322     m_jitExecuteCounter.setNewThreshold(
2323         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2324 #endif
2325 }
2326
2327 void CodeBlock::forceOptimizationSlowPathConcurrently()
2328 {
2329     if (Options::verboseOSR())
2330         dataLog(*this, ": Forcing slow path concurrently.\n");
2331     m_jitExecuteCounter.forceSlowPathConcurrently();
2332 }
2333
2334 #if ENABLE(DFG_JIT)
2335 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2336 {
2337     JITCode::JITType type = jitType();
2338     if (type != JITCode::BaselineJIT) {
2339         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2340         RELEASE_ASSERT_NOT_REACHED();
2341     }
2342     
2343     CodeBlock* theReplacement = replacement();
2344     if ((result == CompilationSuccessful) != (theReplacement != this)) {
2345         dataLog(*this, ": we have result = ", result, " but ");
2346         if (theReplacement == this)
2347             dataLog("we are our own replacement.\n");
2348         else
2349             dataLog("our replacement is ", pointerDump(theReplacement), "\n");
2350         RELEASE_ASSERT_NOT_REACHED();
2351     }
2352     
2353     switch (result) {
2354     case CompilationSuccessful:
2355         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
2356         optimizeNextInvocation();
2357         return;
2358     case CompilationFailed:
2359         dontOptimizeAnytimeSoon();
2360         return;
2361     case CompilationDeferred:
2362         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2363         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2364         // necessarily guarantee anything. So, we make sure that even if that
2365         // function ends up being a no-op, we still eventually retry and realize
2366         // that we have optimized code ready.
2367         optimizeAfterWarmUp();
2368         return;
2369     case CompilationInvalidated:
2370         // Retry with exponential backoff.
2371         countReoptimization();
2372         optimizeAfterWarmUp();
2373         return;
2374     }
2375     
2376     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2377     RELEASE_ASSERT_NOT_REACHED();
2378 }
2379
2380 #endif
2381     
2382 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2383 {
2384     ASSERT(JITCode::isOptimizingJIT(jitType()));
2385     // Compute this the lame way so we don't saturate. This is called infrequently
2386     // enough that this loop won't hurt us.
2387     unsigned result = desiredThreshold;
2388     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2389         unsigned newResult = result << 1;
2390         if (newResult < result)
2391             return std::numeric_limits<uint32_t>::max();
2392         result = newResult;
2393     }
2394     return result;
2395 }
2396
2397 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2398 {
2399     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2400 }
2401
2402 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2403 {
2404     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2405 }
2406
2407 bool CodeBlock::shouldReoptimizeNow()
2408 {
2409     return osrExitCounter() >= exitCountThresholdForReoptimization();
2410 }
2411
2412 bool CodeBlock::shouldReoptimizeFromLoopNow()
2413 {
2414     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2415 }
2416 #endif
2417
2418 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2419 {
2420     for (auto& m_arrayProfile : m_arrayProfiles) {
2421         if (m_arrayProfile.bytecodeOffset() == bytecodeOffset)
2422             return &m_arrayProfile;
2423     }
2424     return 0;
2425 }
2426
2427 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2428 {
2429     ConcurrentJSLocker locker(m_lock);
2430     return getArrayProfile(locker, bytecodeOffset);
2431 }
2432
2433 ArrayProfile* CodeBlock::addArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2434 {
2435     m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
2436     return &m_arrayProfiles.last();
2437 }
2438
2439 ArrayProfile* CodeBlock::addArrayProfile(unsigned bytecodeOffset)
2440 {
2441     ConcurrentJSLocker locker(m_lock);
2442     return addArrayProfile(locker, bytecodeOffset);
2443 }
2444
2445 ArrayProfile* CodeBlock::getOrAddArrayProfile(const ConcurrentJSLocker& locker, unsigned bytecodeOffset)
2446 {
2447     ArrayProfile* result = getArrayProfile(locker, bytecodeOffset);
2448     if (result)
2449         return result;
2450     return addArrayProfile(locker, bytecodeOffset);
2451 }
2452
2453 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
2454 {
2455     ConcurrentJSLocker locker(m_lock);
2456     return getOrAddArrayProfile(locker, bytecodeOffset);
2457 }
2458
2459 #if ENABLE(DFG_JIT)
2460 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2461 {
2462     return m_jitCode->dfgCommon()->codeOrigins;
2463 }
2464
2465 size_t CodeBlock::numberOfDFGIdentifiers() const
2466 {
2467     if (!JITCode::isOptimizingJIT(jitType()))
2468         return 0;
2469     
2470     return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2471 }
2472
2473 const Identifier& CodeBlock::identifier(int index) const
2474 {
2475     size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2476     if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2477         return m_unlinkedCode->identifier(index);
2478     ASSERT(JITCode::isOptimizingJIT(jitType()));
2479     return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2480 }
2481 #endif // ENABLE(DFG_JIT)
2482
2483 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2484 {
2485     ConcurrentJSLocker locker(m_lock);
2486     
2487     numberOfLiveNonArgumentValueProfiles = 0;
2488     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2489     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2490         ValueProfile* profile = getFromAllValueProfiles(i);
2491         unsigned numSamples = profile->totalNumberOfSamples();
2492         if (numSamples > ValueProfile::numberOfBuckets)
2493             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2494         numberOfSamplesInProfiles += numSamples;
2495         if (profile->m_bytecodeOffset < 0) {
2496             profile->computeUpdatedPrediction(locker);
2497             continue;
2498         }
2499         if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
2500             numberOfLiveNonArgumentValueProfiles++;
2501         profile->computeUpdatedPrediction(locker);
2502     }
2503     
2504 #if ENABLE(DFG_JIT)
2505     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
2506 #endif
2507 }
2508
2509 void CodeBlock::updateAllValueProfilePredictions()
2510 {
2511     unsigned ignoredValue1, ignoredValue2;
2512     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2513 }
2514
2515 void CodeBlock::updateAllArrayPredictions()
2516 {
2517     ConcurrentJSLocker locker(m_lock);
2518     
2519     for (unsigned i = m_arrayProfiles.size(); i--;)
2520         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
2521     
2522     // Don't count these either, for similar reasons.
2523     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
2524         m_arrayAllocationProfiles[i].updateIndexingType();
2525 }
2526
2527 void CodeBlock::updateAllPredictions()
2528 {
2529     updateAllValueProfilePredictions();
2530     updateAllArrayPredictions();
2531 }
2532
2533 bool CodeBlock::shouldOptimizeNow()
2534 {
2535     if (Options::verboseOSR())
2536         dataLog("Considering optimizing ", *this, "...\n");
2537
2538     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2539         return true;
2540     
2541     updateAllArrayPredictions();
2542     
2543     unsigned numberOfLiveNonArgumentValueProfiles;
2544     unsigned numberOfSamplesInProfiles;
2545     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2546
2547     if (Options::verboseOSR()) {
2548         dataLogF(
2549             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2550             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
2551             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
2552             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
2553             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
2554     }
2555
2556     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
2557         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2558         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2559         return true;
2560     
2561     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2562     m_optimizationDelayCounter++;
2563     optimizeAfterWarmUp();
2564     return false;
2565 }
2566
2567 #if ENABLE(DFG_JIT)
2568 void CodeBlock::tallyFrequentExitSites()
2569 {
2570     ASSERT(JITCode::isOptimizingJIT(jitType()));
2571     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
2572     
2573     CodeBlock* profiledBlock = alternative();
2574     
2575     switch (jitType()) {
2576     case JITCode::DFGJIT: {
2577         DFG::JITCode* jitCode = m_jitCode->dfg();
2578         for (auto& exit : jitCode->osrExit)
2579             exit.considerAddingAsFrequentExitSite(profiledBlock);
2580         break;
2581     }
2582
2583 #if ENABLE(FTL_JIT)
2584     case JITCode::FTLJIT: {
2585         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2586         // vector contains a totally different type, that just so happens to behave like
2587         // DFG::JITCode::osrExit.
2588         FTL::JITCode* jitCode = m_jitCode->ftl();
2589         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2590             FTL::OSRExit& exit = jitCode->osrExit[i];
2591             exit.considerAddingAsFrequentExitSite(profiledBlock);
2592         }
2593         break;
2594     }
2595 #endif
2596         
2597     default:
2598         RELEASE_ASSERT_NOT_REACHED();
2599         break;
2600     }
2601 }
2602 #endif // ENABLE(DFG_JIT)
2603
2604 #if ENABLE(VERBOSE_VALUE_PROFILE)
2605 void CodeBlock::dumpValueProfiles()
2606 {
2607     dataLog("ValueProfile for ", *this, ":\n");
2608     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2609         ValueProfile* profile = getFromAllValueProfiles(i);
2610         if (profile->m_bytecodeOffset < 0) {
2611             ASSERT(profile->m_bytecodeOffset == -1);
2612             dataLogF("   arg = %u: ", i);
2613         } else
2614             dataLogF("   bc = %d: ", profile->m_bytecodeOffset);
2615         if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
2616             dataLogF("<empty>\n");
2617             continue;
2618         }
2619         profile->dump(WTF::dataFile());
2620         dataLogF("\n");
2621     }
2622     dataLog("RareCaseProfile for ", *this, ":\n");
2623     for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
2624         RareCaseProfile* profile = rareCaseProfile(i);
2625         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2626     }
2627 }
2628 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2629
2630 unsigned CodeBlock::frameRegisterCount()
2631 {
2632     switch (jitType()) {
2633     case JITCode::InterpreterThunk:
2634         return LLInt::frameRegisterCountFor(this);
2635
2636 #if ENABLE(JIT)
2637     case JITCode::BaselineJIT:
2638         return JIT::frameRegisterCountFor(this);
2639 #endif // ENABLE(JIT)
2640
2641 #if ENABLE(DFG_JIT)
2642     case JITCode::DFGJIT:
2643     case JITCode::FTLJIT:
2644         return jitCode()->dfgCommon()->frameRegisterCount;
2645 #endif // ENABLE(DFG_JIT)
2646         
2647     default:
2648         RELEASE_ASSERT_NOT_REACHED();
2649         return 0;
2650     }
2651 }
2652
2653 int CodeBlock::stackPointerOffset()
2654 {
2655     return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2656 }
2657
2658 size_t CodeBlock::predictedMachineCodeSize()
2659 {
2660     // This will be called from CodeBlock::CodeBlock before either m_vm or the
2661     // instructions have been initialized. It's OK to return 0 because what will really
2662     // matter is the recomputation of this value when the slow path is triggered.
2663     if (!m_vm)
2664         return 0;
2665     
2666     if (!*m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2667         return 0; // It's as good of a prediction as we'll get.
2668     
2669     // Be conservative: return a size that will be an overestimation 84% of the time.
2670     double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2671         m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2672     
2673     // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2674     // here is OK, since this whole method is just a heuristic.
2675     if (multiplier < 0 || multiplier > 1000)
2676         return 0;
2677     
2678     double doubleResult = multiplier * m_instructions.size();
2679     
2680     // Be even more paranoid: silently reject values that won't fit into a size_t. If
2681     // the function is so huge that we can't even fit it into virtual memory then we
2682     // should probably have some other guards in place to prevent us from even getting
2683     // to this point.
2684     if (doubleResult > std::numeric_limits<size_t>::max())
2685         return 0;
2686     
2687     return static_cast<size_t>(doubleResult);
2688 }
2689
2690 bool CodeBlock::usesOpcode(OpcodeID opcodeID)
2691 {
2692     Interpreter* interpreter = vm()->interpreter;
2693     Instruction* instructionsBegin = instructions().begin();
2694     unsigned instructionCount = instructions().size();
2695     
2696     for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
2697         switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset])) {
2698 #define DEFINE_OP(curOpcode, length)        \
2699         case curOpcode:                     \
2700             if (curOpcode == opcodeID)      \
2701                 return true;                \
2702             bytecodeOffset += length;       \
2703             break;
2704             FOR_EACH_OPCODE_ID(DEFINE_OP)
2705 #undef DEFINE_OP
2706         default:
2707             RELEASE_ASSERT_NOT_REACHED();
2708             break;
2709         }
2710     }
2711     
2712     return false;
2713 }
2714
2715 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2716 {
2717     for (auto& constantRegister : m_constantRegisters) {
2718         if (constantRegister.get().isEmpty())
2719             continue;
2720         if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2721             ConcurrentJSLocker locker(symbolTable->m_lock);
2722             auto end = symbolTable->end(locker);
2723             for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2724                 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2725                     // FIXME: This won't work from the compilation thread.
2726                     // https://bugs.webkit.org/show_bug.cgi?id=115300
2727                     return ptr->key.get();
2728                 }
2729             }
2730         }
2731     }
2732     if (virtualRegister == thisRegister())
2733         return ASCIILiteral("this");
2734     if (virtualRegister.isArgument())
2735         return String::format("arguments[%3d]", virtualRegister.toArgument());
2736
2737     return "";
2738 }
2739
2740 ValueProfile* CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2741 {
2742     OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instructions()[bytecodeOffset]);
2743     unsigned length = opcodeLength(opcodeID);
2744     return instructions()[bytecodeOffset + length - 1].u.profile;
2745 }
2746
2747 void CodeBlock::validate()
2748 {
2749     BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2750     
2751     FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
2752     
2753     if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2754         beginValidationDidFail();
2755         dataLog("    Wrong number of bits in result!\n");
2756         dataLog("    Result: ", liveAtHead, "\n");
2757         dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
2758         endValidationDidFail();
2759     }
2760     
2761     for (unsigned i = m_numCalleeLocals; i--;) {
2762         VirtualRegister reg = virtualRegisterForLocal(i);
2763         
2764         if (liveAtHead[i]) {
2765             beginValidationDidFail();
2766             dataLog("    Variable ", reg, " is expected to be dead.\n");
2767             dataLog("    Result: ", liveAtHead, "\n");
2768             endValidationDidFail();
2769         }
2770     }
2771 }
2772
2773 void CodeBlock::beginValidationDidFail()
2774 {
2775     dataLog("Validation failure in ", *this, ":\n");
2776     dataLog("\n");
2777 }
2778
2779 void CodeBlock::endValidationDidFail()
2780 {
2781     dataLog("\n");
2782     dumpBytecode();
2783     dataLog("\n");
2784     dataLog("Validation failure.\n");
2785     RELEASE_ASSERT_NOT_REACHED();
2786 }
2787
2788 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
2789 {
2790     m_numBreakpoints += numBreakpoints;
2791     ASSERT(m_numBreakpoints);
2792     if (JITCode::isOptimizingJIT(jitType()))
2793         jettison(Profiler::JettisonDueToDebuggerBreakpoint);
2794 }
2795
2796 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
2797 {
2798     m_steppingMode = mode;
2799     if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
2800         jettison(Profiler::JettisonDueToDebuggerStepping);
2801 }
2802
2803 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
2804 {
2805     m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
2806     return &m_rareCaseProfiles.last();
2807 }
2808
2809 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
2810 {
2811     return tryBinarySearch<RareCaseProfile, int>(
2812         m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
2813         getRareCaseProfileBytecodeOffset);
2814 }
2815
2816 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
2817 {
2818     RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(bytecodeOffset);
2819     if (profile)
2820         return profile->m_counter;
2821     return 0;
2822 }
2823
2824 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset)
2825 {
2826     return arithProfileForPC(instructions().begin() + bytecodeOffset);
2827 }
2828
2829 ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc)
2830 {
2831     auto opcodeID = vm()->interpreter->getOpcodeID(pc[0]);
2832     switch (opcodeID) {
2833     case op_negate:
2834         return bitwise_cast<ArithProfile*>(&pc[3].u.operand);
2835     case op_bitor:
2836     case op_bitand:
2837     case op_bitxor:
2838     case op_add:
2839     case op_mul:
2840     case op_sub:
2841     case op_div:
2842         return bitwise_cast<ArithProfile*>(&pc[4].u.operand);
2843     default:
2844         break;
2845     }
2846
2847     return nullptr;
2848 }
2849
2850 bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset)
2851 {
2852     if (!hasBaselineJITProfiling())
2853         return false;
2854     ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
2855     if (!profile)
2856         return false;
2857     return profile->tookSpecialFastPath();
2858 }
2859
2860 #if ENABLE(JIT)
2861 DFG::CapabilityLevel CodeBlock::capabilityLevel()
2862 {
2863     DFG::CapabilityLevel result = computeCapabilityLevel();
2864     m_capabilityLevelState = result;
2865     return result;
2866 }
2867 #endif
2868
2869 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions)
2870 {
2871     if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
2872         return;
2873     const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
2874     for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
2875         // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 
2876         // the next op_profile_control_flow will give us the text range of a single basic block.
2877         size_t startIdx = bytecodeOffsets[i];
2878         RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[startIdx]) == op_profile_control_flow);
2879         int basicBlockStartOffset = instructions[startIdx + 1].u.operand;
2880         int basicBlockEndOffset;
2881         if (i + 1 < offsetsLength) {
2882             size_t endIdx = bytecodeOffsets[i + 1];
2883             RELEASE_ASSERT(vm()->interpreter->getOpcodeID(instructions[endIdx]) == op_profile_control_flow);
2884             basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1;
2885         } else {
2886             basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace.
2887             basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
2888         }
2889
2890         // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
2891         // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than 
2892         // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node 
2893         // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different 
2894         // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript 
2895         // program. The condition: 
2896         // (basicBlockEndOffset < basicBlockStartOffset) 
2897         // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic 
2898         // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These 
2899         // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same 
2900         // internal data structure, so if any of them execute, it will record the same textual basic block in the 
2901         // JavaScript program as executing.
2902         // At the bytecode level, this situation looks like:
2903         // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
2904         // ...
2905         // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
2906         // ...
2907         // m: op_profile_control_flow
2908         if (basicBlockEndOffset < basicBlockStartOffset) {
2909             RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
2910             instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
2911             continue;
2912         }
2913
2914         BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerScriptExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
2915
2916         // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
2917         // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
2918         // This is necessary because in the original source text of a JavaScript program, 
2919         // function literals form new basic blocks boundaries, but they aren't represented 
2920         // inside the CodeBlock's instruction stream.
2921         auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
2922             const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
2923             int functionStart = executable->typeProfilingStartOffset();
2924             int functionEnd = executable->typeProfilingEndOffset();
2925             if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
2926                 basicBlockLocation->insertGap(functionStart, functionEnd);
2927         };
2928
2929         for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
2930             insertFunctionGaps(executable);
2931         for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
2932             insertFunctionGaps(executable);
2933
2934         instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;
2935     }
2936 }
2937
2938 #if ENABLE(JIT)
2939 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
2940
2941     m_pcToCodeOriginMap = WTFMove(map);
2942 }
2943
2944 std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
2945 {
2946     if (m_pcToCodeOriginMap) {
2947         if (std::optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
2948             return codeOrigin;
2949     }
2950
2951     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2952         StructureStubInfo* stub = *iter;
2953         if (stub->containsPC(pc))
2954             return std::optional<CodeOrigin>(stub->codeOrigin);
2955     }
2956
2957     if (std::optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
2958         return codeOrigin;
2959
2960     return std::nullopt;
2961 }
2962 #endif // ENABLE(JIT)
2963
2964 std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
2965 {
2966     std::optional<unsigned> bytecodeOffset;
2967     JITCode::JITType jitType = this->jitType();
2968     if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) {
2969 #if USE(JSVALUE64)
2970         bytecodeOffset = callSiteIndex.bits();
2971 #else
2972         Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
2973         bytecodeOffset = instruction - instructions().begin();
2974 #endif
2975     } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) {
2976 #if ENABLE(DFG_JIT)
2977         RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
2978         CodeOrigin origin = codeOrigin(callSiteIndex);
2979         bytecodeOffset = origin.bytecodeIndex;
2980 #else
2981         RELEASE_ASSERT_NOT_REACHED();
2982 #endif
2983     }
2984
2985     return bytecodeOffset;
2986 }
2987
2988 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
2989 {
2990     switch (unlinkedCodeBlock()->didOptimize()) {
2991     case MixedTriState:
2992         return threshold;
2993     case FalseTriState:
2994         return threshold * 4;
2995     case TrueTriState:
2996         return threshold / 2;
2997     }
2998     ASSERT_NOT_REACHED();
2999     return threshold;
3000 }
3001
3002 void CodeBlock::jitAfterWarmUp()
3003 {
3004     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3005 }
3006
3007 void CodeBlock::jitSoon()
3008 {
3009     m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3010 }
3011
3012 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3013 {
3014 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3015     
3016     // This function may be called from a signal handler. We need to be
3017     // careful to not call anything that is not signal handler safe, e.g.
3018     // we should not perturb the refCount of m_jitCode.
3019     if (!JITCode::isOptimizingJIT(jitType()))
3020         return false;
3021     return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3022 #else
3023     return false;
3024 #endif
3025 }
3026
3027 bool CodeBlock::installVMTrapBreakpoints()
3028 {
3029 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3030     // This function may be called from a signal handler. We need to be
3031     // careful to not call anything that is not signal handler safe, e.g.
3032     // we should not perturb the refCount of m_jitCode.
3033     if (!JITCode::isOptimizingJIT(jitType()))
3034         return false;
3035     m_jitCode->dfgCommon()->installVMTrapBreakpoints();
3036     return true;
3037 #else
3038     return false;
3039 #endif
3040 }
3041
3042 void CodeBlock::dumpMathICStats()
3043 {
3044 #if ENABLE(MATH_IC_STATS)
3045     double numAdds = 0.0;
3046     double totalAddSize = 0.0;
3047     double numMuls = 0.0;
3048     double totalMulSize = 0.0;
3049     double numNegs = 0.0;
3050     double totalNegSize = 0.0;
3051     double numSubs = 0.0;
3052     double totalSubSize = 0.0;
3053
3054     auto countICs = [&] (CodeBlock* codeBlock) {
3055         for (JITAddIC* addIC : codeBlock->m_addICs) {
3056             numAdds++;
3057             totalAddSize += addIC->codeSize();
3058         }
3059
3060         for (JITMulIC* mulIC : codeBlock->m_mulICs) {
3061             numMuls++;
3062             totalMulSize += mulIC->codeSize();
3063         }
3064
3065         for (JITNegIC* negIC : codeBlock->m_negICs) {
3066             numNegs++;
3067             totalNegSize += negIC->codeSize();
3068         }
3069
3070         for (JITSubIC* subIC : codeBlock->m_subICs) {
3071             numSubs++;
3072             totalSubSize += subIC->codeSize();
3073         }
3074
3075         return false;
3076     };
3077     heap()->forEachCodeBlock(countICs);
3078
3079     dataLog("Num Adds: ", numAdds, "\n");
3080     dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3081     dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3082     dataLog("\n");
3083     dataLog("Num Muls: ", numMuls, "\n");
3084     dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3085     dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3086     dataLog("\n");
3087     dataLog("Num Negs: ", numNegs, "\n");
3088     dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3089     dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3090     dataLog("\n");
3091     dataLog("Num Subs: ", numSubs, "\n");
3092     dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3093     dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3094
3095     dataLog("-----------------------\n");
3096 #endif
3097 }
3098
3099 BytecodeLivenessAnalysis& CodeBlock::livenessAnalysisSlow()
3100 {
3101     std::unique_ptr<BytecodeLivenessAnalysis> analysis = std::make_unique<BytecodeLivenessAnalysis>(this);
3102     {
3103         ConcurrentJSLocker locker(m_lock);
3104         if (!m_livenessAnalysis)
3105             m_livenessAnalysis = WTFMove(analysis);
3106         return *m_livenessAnalysis;
3107     }
3108 }
3109
3110 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3111 {
3112     Printer::setPrinter(record, toCString(codeBlock));
3113 }
3114
3115 } // namespace JSC
3116
3117 namespace WTF {
3118     
3119 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3120 {
3121     if (UNLIKELY(!codeBlock)) {
3122         out.print("<null codeBlock>");
3123         return;
3124     }
3125     out.print(*codeBlock);
3126 }
3127     
3128 } // namespace WTF