2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeDumper.h"
36 #include "BytecodeGenerator.h"
37 #include "BytecodeLivenessAnalysis.h"
38 #include "BytecodeStructs.h"
39 #include "BytecodeUseDef.h"
40 #include "CallLinkStatus.h"
41 #include "CodeBlockInlines.h"
42 #include "CodeBlockSet.h"
43 #include "DFGCapabilities.h"
44 #include "DFGCommon.h"
45 #include "DFGDriver.h"
46 #include "DFGJITCode.h"
47 #include "DFGWorklist.h"
49 #include "EvalCodeBlock.h"
50 #include "FullCodeOrigin.h"
51 #include "FunctionCodeBlock.h"
52 #include "FunctionExecutableDump.h"
53 #include "GetPutInfo.h"
54 #include "InlineCallFrame.h"
55 #include "Instruction.h"
56 #include "InstructionStream.h"
57 #include "InterpreterInlines.h"
58 #include "IsoCellSetInlines.h"
60 #include "JITMathIC.h"
62 #include "JSCInlines.h"
63 #include "JSCJSValue.h"
64 #include "JSFunction.h"
65 #include "JSLexicalEnvironment.h"
66 #include "JSModuleEnvironment.h"
69 #include "JSTemplateObjectDescriptor.h"
70 #include "LLIntData.h"
71 #include "LLIntEntrypoint.h"
72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
73 #include "LowLevelInterpreter.h"
74 #include "MetadataTable.h"
75 #include "ModuleProgramCodeBlock.h"
76 #include "ObjectAllocationProfileInlines.h"
77 #include "OpcodeInlines.h"
78 #include "PCToCodeOriginMap.h"
79 #include "PolymorphicAccess.h"
80 #include "ProfilerDatabase.h"
81 #include "ProgramCodeBlock.h"
82 #include "ReduceWhitespace.h"
84 #include "SlotVisitorInlines.h"
85 #include "StackVisitor.h"
86 #include "StructureStubInfo.h"
87 #include "TypeLocationCache.h"
88 #include "TypeProfiler.h"
89 #include "VMInlines.h"
90 #include <wtf/BagToHashMap.h>
91 #include <wtf/CommaPrinter.h>
92 #include <wtf/Forward.h>
93 #include <wtf/SimpleStats.h>
94 #include <wtf/StringPrintStream.h>
95 #include <wtf/text/StringConcatenateNumbers.h>
96 #include <wtf/text/UniquedStringImpl.h>
99 #include "RegisterAtOffsetList.h"
103 #include "DFGOperations.h"
107 #include "FTLJITCode.h"
112 const ClassInfo CodeBlock::s_info = {
113 "CodeBlock", nullptr, nullptr, nullptr,
114 CREATE_METHOD_TABLE(CodeBlock)
117 CString CodeBlock::inferredName() const
119 switch (codeType()) {
125 return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
130 return CString("", 0);
134 bool CodeBlock::hasHash() const
139 bool CodeBlock::isSafeToComputeHash() const
141 return !isCompilationThread();
144 CodeBlockHash CodeBlock::hash() const
147 RELEASE_ASSERT(isSafeToComputeHash());
148 m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
153 CString CodeBlock::sourceCodeForTools() const
155 if (codeType() != FunctionCode)
156 return ownerExecutable()->source().toUTF8();
158 SourceProvider* provider = source().provider();
159 FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
160 UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
161 unsigned unlinkedStartOffset = unlinked->startOffset();
162 unsigned linkedStartOffset = executable->source().startOffset();
163 int delta = linkedStartOffset - unlinkedStartOffset;
164 unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
165 unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
168 provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
171 CString CodeBlock::sourceCodeOnOneLine() const
173 return reduceWhitespace(sourceCodeForTools());
176 CString CodeBlock::hashAsStringIfPossible() const
178 if (hasHash() || isSafeToComputeHash())
179 return toCString(hash());
183 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const
185 out.print(inferredName(), "#", hashAsStringIfPossible());
186 out.print(":[", RawPointer(this), "->");
188 out.print(RawPointer(alternative()), "->");
189 out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
191 if (codeType() == FunctionCode)
192 out.print(specializationKind());
193 out.print(", ", instructionsSize());
194 if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined)
195 out.print(" (ShouldAlwaysBeInlined)");
196 if (ownerExecutable()->neverInline())
197 out.print(" (NeverInline)");
198 if (ownerExecutable()->neverOptimize())
199 out.print(" (NeverOptimize)");
200 else if (ownerExecutable()->neverFTLOptimize())
201 out.print(" (NeverFTLOptimize)");
202 if (ownerExecutable()->didTryToEnterInLoop())
203 out.print(" (DidTryToEnterInLoop)");
204 if (ownerExecutable()->isStrictMode())
205 out.print(" (StrictMode)");
206 if (m_didFailJITCompilation)
207 out.print(" (JITFail)");
208 if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation)
209 out.print(" (FTLFail)");
210 if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL)
211 out.print(" (HadFTLReplacement)");
215 void CodeBlock::dump(PrintStream& out) const
217 dumpAssumingJITType(out, jitType());
220 void CodeBlock::dumpSource()
222 dumpSource(WTF::dataFile());
225 void CodeBlock::dumpSource(PrintStream& out)
227 ScriptExecutable* executable = ownerExecutable();
228 if (executable->isFunctionExecutable()) {
229 FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
230 StringView source = functionExecutable->source().provider()->getRange(
231 functionExecutable->parametersStartOffset(),
232 functionExecutable->typeProfilingEndOffset(*vm()) + 1); // Type profiling end offset is the character before the '}'.
234 out.print("function ", inferredName(), source);
237 out.print(executable->source().view());
240 void CodeBlock::dumpBytecode()
242 dumpBytecode(WTF::dataFile());
245 void CodeBlock::dumpBytecode(PrintStream& out)
247 ICStatusMap statusMap;
248 getICStatusMap(statusMap);
249 BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap);
252 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap)
254 BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap);
257 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap)
259 const auto it = instructions().at(bytecodeOffset);
260 dumpBytecode(out, it, statusMap);
265 class PutToScopeFireDetail : public FireDetail {
267 PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
268 : m_codeBlock(codeBlock)
273 void dump(PrintStream& out) const override
275 out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
279 CodeBlock* m_codeBlock;
280 const Identifier& m_ident;
283 } // anonymous namespace
285 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
286 : JSCell(*vm, structure)
287 , m_globalObject(other.m_globalObject)
288 , m_shouldAlwaysBeInlined(true)
290 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
292 , m_didFailJITCompilation(false)
293 , m_didFailFTLCompilation(false)
294 , m_hasBeenCompiledWithFTL(false)
295 , m_numCalleeLocals(other.m_numCalleeLocals)
296 , m_numVars(other.m_numVars)
297 , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
298 , m_hasDebuggerStatement(false)
299 , m_steppingMode(SteppingModeDisabled)
300 , m_numBreakpoints(0)
301 , m_bytecodeCost(other.m_bytecodeCost)
302 , m_scopeRegister(other.m_scopeRegister)
303 , m_hash(other.m_hash)
304 , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get())
305 , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get())
307 , m_instructionsRawPointer(other.m_instructionsRawPointer)
308 , m_constantRegisters(other.m_constantRegisters)
309 , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
310 , m_functionDecls(other.m_functionDecls)
311 , m_functionExprs(other.m_functionExprs)
312 , m_osrExitCounter(0)
313 , m_optimizationDelayCounter(0)
314 , m_reoptimizationRetryCounter(0)
315 , m_metadata(other.m_metadata)
316 , m_creationTime(MonotonicTime::now())
318 ASSERT(heap()->isDeferred());
319 ASSERT(m_scopeRegister.isLocal());
321 ASSERT(source().provider());
322 setNumParameters(other.numParameters());
324 vm->heap.codeBlockSet().add(this);
327 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
329 Base::finishCreation(vm);
330 finishCreationCommon(vm);
332 optimizeAfterWarmUp();
335 if (other.m_rareData) {
336 createRareDataIfNecessary();
338 m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
339 m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
340 m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
344 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope)
345 : JSCell(*vm, structure)
346 , m_globalObject(*vm, this, scope->globalObject(*vm))
347 , m_shouldAlwaysBeInlined(true)
349 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
351 , m_didFailJITCompilation(false)
352 , m_didFailFTLCompilation(false)
353 , m_hasBeenCompiledWithFTL(false)
354 , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
355 , m_numVars(unlinkedCodeBlock->numVars())
356 , m_hasDebuggerStatement(false)
357 , m_steppingMode(SteppingModeDisabled)
358 , m_numBreakpoints(0)
359 , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
360 , m_unlinkedCode(*vm, this, unlinkedCodeBlock)
361 , m_ownerExecutable(*vm, this, ownerExecutable)
363 , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer())
364 , m_osrExitCounter(0)
365 , m_optimizationDelayCounter(0)
366 , m_reoptimizationRetryCounter(0)
367 , m_metadata(unlinkedCodeBlock->metadata().link())
368 , m_creationTime(MonotonicTime::now())
370 ASSERT(heap()->isDeferred());
371 ASSERT(m_scopeRegister.isLocal());
373 ASSERT(source().provider());
374 setNumParameters(unlinkedCodeBlock->numParameters());
376 vm->heap.codeBlockSet().add(this);
379 // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process
380 // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope
381 // chain. For example, this process allows us to cache the depth of lexical environment reads that reach
382 // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that
383 // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
384 // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
385 // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
386 // inside UnlinkedCodeBlock.
387 bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
390 Base::finishCreation(vm);
391 finishCreationCommon(vm);
393 auto throwScope = DECLARE_THROW_SCOPE(vm);
395 if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
396 vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm));
398 setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
399 RETURN_IF_EXCEPTION(throwScope, false);
401 for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
402 LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
403 if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
404 m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
407 // We already have the cloned symbol table for the module environment since we need to instantiate
408 // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
409 if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) {
410 SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
411 if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
412 ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
413 clonedSymbolTable->prepareForTypeProfiling(locker);
415 replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
418 bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes();
419 m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
420 for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
421 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
422 if (shouldUpdateFunctionHasExecutedCache)
423 vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
424 m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
427 m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
428 for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
429 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
430 if (shouldUpdateFunctionHasExecutedCache)
431 vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
432 m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, ownerExecutable->source()));
435 if (unlinkedCodeBlock->hasRareData()) {
436 createRareDataIfNecessary();
438 setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets());
439 RETURN_IF_EXCEPTION(throwScope, false);
441 if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
442 m_rareData->m_exceptionHandlers.resizeToFit(count);
443 for (size_t i = 0; i < count; i++) {
444 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
445 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
447 MacroAssemblerCodePtr<BytecodePtrTag> codePtr = instructions().at(unlinkedHandler.target)->isWide()
448 ? LLInt::getWideCodePtr<BytecodePtrTag>(op_catch)
449 : LLInt::getCodePtr<BytecodePtrTag>(op_catch);
450 handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
452 handler.initialize(unlinkedHandler);
457 if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
458 m_rareData->m_stringSwitchJumpTables.grow(count);
459 for (size_t i = 0; i < count; i++) {
460 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
461 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
462 for (; ptr != end; ++ptr) {
463 OffsetLocation offset;
464 offset.branchOffset = ptr->value.branchOffset;
465 m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
470 if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
471 m_rareData->m_switchJumpTables.grow(count);
472 for (size_t i = 0; i < count; i++) {
473 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
474 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
475 destTable.branchOffsets = sourceTable.branchOffsets;
476 destTable.min = sourceTable.min;
481 // Bookkeep the strongly referenced module environments.
482 HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
484 auto link_profile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
485 m_numberOfNonArgumentValueProfiles++;
486 metadata.m_profile.m_bytecodeOffset = instruction.offset();
489 auto link_arrayProfile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) {
490 metadata.m_arrayProfile.m_bytecodeOffset = instruction.offset();
493 auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
494 metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity);
497 auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) {
498 metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType);
501 auto link_hitCountForLLIntCaching = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& metadata) {
502 metadata.m_hitCountForLLIntCaching = Options::prototypeHitCountForLLIntCaching();
505 #define LINK_FIELD(__field) \
506 WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata);
508 #define INITIALIZE_METADATA(__op) \
509 auto bytecode = instruction->as<__op>(); \
510 auto& metadata = bytecode.metadata(this); \
511 new (&metadata) __op::Metadata { bytecode }; \
513 #define CASE(__op) case __op::opcodeID
516 CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \
517 INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \
518 WTF_LAZY_HAS_REST(__VA_ARGS__)({ \
519 WTF_LAZY_FOR_EACH_TERM(LINK_FIELD, WTF_LAZY_REST_(__VA_ARGS__)) \
524 const InstructionStream& instructionStream = instructions();
525 for (const auto& instruction : instructionStream) {
526 OpcodeID opcodeID = instruction->opcodeID();
527 m_bytecodeCost += opcodeLengths[opcodeID];
529 LINK(OpHasIndexedProperty, arrayProfile)
531 LINK(OpCallVarargs, arrayProfile, profile)
532 LINK(OpTailCallVarargs, arrayProfile, profile)
533 LINK(OpTailCallForwardArguments, arrayProfile, profile)
534 LINK(OpConstructVarargs, arrayProfile, profile)
535 LINK(OpGetByVal, arrayProfile, profile)
537 LINK(OpGetDirectPname, profile)
538 LINK(OpGetByIdWithThis, profile)
539 LINK(OpTryGetById, profile)
540 LINK(OpGetByIdDirect, profile)
541 LINK(OpGetByValWithThis, profile)
542 LINK(OpGetFromArguments, profile)
543 LINK(OpToNumber, profile)
544 LINK(OpToObject, profile)
545 LINK(OpGetArgument, profile)
546 LINK(OpToThis, profile)
547 LINK(OpBitand, profile)
548 LINK(OpBitor, profile)
549 LINK(OpBitnot, profile)
550 LINK(OpBitxor, profile)
552 LINK(OpGetById, profile, hitCountForLLIntCaching)
554 LINK(OpCall, profile, arrayProfile)
555 LINK(OpTailCall, profile, arrayProfile)
556 LINK(OpCallEval, profile, arrayProfile)
557 LINK(OpConstruct, profile, arrayProfile)
559 LINK(OpInByVal, arrayProfile)
560 LINK(OpPutByVal, arrayProfile)
561 LINK(OpPutByValDirect, arrayProfile)
564 LINK(OpNewArrayWithSize)
565 LINK(OpNewArrayBuffer, arrayAllocationProfile)
567 LINK(OpNewObject, objectAllocationProfile)
582 LINK(OpProfileControlFlow)
584 case op_resolve_scope: {
585 INITIALIZE_METADATA(OpResolveScope)
587 const Identifier& ident = identifier(bytecode.m_var);
588 RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar);
590 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
591 RETURN_IF_EXCEPTION(throwScope, false);
593 metadata.m_resolveType = op.type;
594 metadata.m_localScopeDepth = op.depth;
595 if (op.lexicalEnvironment) {
596 if (op.type == ModuleVar) {
597 // Keep the linked module environment strongly referenced.
598 if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
599 addConstant(op.lexicalEnvironment);
600 metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment);
602 metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
603 } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) {
604 metadata.m_constantScope.set(vm, this, constantScope);
605 if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks)
606 metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch();
608 metadata.m_globalObject = nullptr;
612 case op_get_from_scope: {
613 INITIALIZE_METADATA(OpGetFromScope)
615 link_profile(instruction, bytecode, metadata);
616 metadata.m_watchpointSet = nullptr;
618 ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode()));
619 if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
620 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
624 const Identifier& ident = identifier(bytecode.m_var);
625 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization);
626 RETURN_IF_EXCEPTION(throwScope, false);
628 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
629 if (op.type == ModuleVar)
630 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode());
631 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
632 metadata.m_watchpointSet = op.watchpointSet;
633 else if (op.structure)
634 metadata.m_structure.set(vm, this, op.structure);
635 metadata.m_operand = op.operand;
639 case op_put_to_scope: {
640 INITIALIZE_METADATA(OpPutToScope)
642 if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) {
643 // Only do watching if the property we're putting to is not anonymous.
644 if (bytecode.m_var != UINT_MAX) {
645 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth));
646 const Identifier& ident = identifier(bytecode.m_var);
647 ConcurrentJSLocker locker(symbolTable->m_lock);
648 auto iter = symbolTable->find(locker, ident.impl());
649 ASSERT(iter != symbolTable->end(locker));
650 iter->value.prepareToWatch();
651 metadata.m_watchpointSet = iter->value.watchpointSet();
653 metadata.m_watchpointSet = nullptr;
657 const Identifier& ident = identifier(bytecode.m_var);
658 metadata.m_watchpointSet = nullptr;
659 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth, scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode());
660 RETURN_IF_EXCEPTION(throwScope, false);
662 metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode());
663 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
664 metadata.m_watchpointSet = op.watchpointSet;
665 else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
666 if (op.watchpointSet)
667 op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
668 } else if (op.structure)
669 metadata.m_structure.set(vm, this, op.structure);
670 metadata.m_operand = op.operand;
674 case op_profile_type: {
675 RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes());
677 INITIALIZE_METADATA(OpProfileType)
679 size_t instructionOffset = instruction.offset() + instruction->size() - 1;
680 unsigned divotStart, divotEnd;
681 GlobalVariableID globalVariableID = 0;
682 RefPtr<TypeSet> globalTypeSet;
683 bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
684 SymbolTable* symbolTable = nullptr;
686 switch (bytecode.m_flag) {
687 case ProfileTypeBytecodeClosureVar: {
688 const Identifier& ident = identifier(bytecode.m_identifier);
689 unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth;
690 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
691 // we're abstractly "read"ing from a JSScope.
692 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization);
693 RETURN_IF_EXCEPTION(throwScope, false);
695 if (op.type == ClosureVar || op.type == ModuleVar)
696 symbolTable = op.lexicalEnvironment->symbolTable();
697 else if (op.type == GlobalVar)
698 symbolTable = m_globalObject.get()->symbolTable();
700 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
702 ConcurrentJSLocker locker(symbolTable->m_lock);
703 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
704 symbolTable->prepareForTypeProfiling(locker);
705 globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
706 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
708 globalVariableID = TypeProfilerNoGlobalIDExists;
712 case ProfileTypeBytecodeLocallyResolved: {
713 int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth;
714 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
715 const Identifier& ident = identifier(bytecode.m_identifier);
716 ConcurrentJSLocker locker(symbolTable->m_lock);
717 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
718 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
719 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
723 case ProfileTypeBytecodeDoesNotHaveGlobalID:
724 case ProfileTypeBytecodeFunctionArgument: {
725 globalVariableID = TypeProfilerNoGlobalIDExists;
728 case ProfileTypeBytecodeFunctionReturnStatement: {
729 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
730 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
731 globalVariableID = TypeProfilerReturnStatement;
732 if (!shouldAnalyze) {
733 // Because a return statement can be added implicitly to return undefined at the end of a function,
734 // and these nodes don't emit expression ranges because they aren't in the actual source text of
735 // the user's program, give the type profiler some range to identify these return statements.
736 // Currently, the text offset that is used as identification is "f" in the function keyword
737 // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
738 divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm);
739 shouldAnalyze = true;
745 std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
746 ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm);
747 TypeLocation* location = locationPair.first;
748 bool isNewLocation = locationPair.second;
750 if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement)
751 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm);
753 if (shouldAnalyze && isNewLocation)
754 vm.typeProfiler()->insertNewLocation(location);
756 metadata.m_typeLocation = location;
761 if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint)
762 m_hasDebuggerStatement = true;
766 case op_create_rest: {
767 int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip;
768 ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
769 // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT.");
770 m_numberOfArgumentsToSkip = numberOfArgumentsToSkip;
780 #undef INITIALIZE_METADATA
784 if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes())
785 insertBasicBlockBoundariesForControlFlowProfiler();
787 // Set optimization thresholds only after instructions is initialized, since these
788 // rely on the instruction count (and are in theory permitted to also inspect the
789 // instruction stream to more accurate assess the cost of tier-up).
790 optimizeAfterWarmUp();
793 // If the concurrent thread will want the code block's hash, then compute it here
795 if (Options::alwaysComputeHash())
798 if (Options::dumpGeneratedBytecodes())
802 vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes());
807 void CodeBlock::finishCreationCommon(VM& vm)
809 m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
812 CodeBlock::~CodeBlock()
816 vm.heap.codeBlockSet().remove(this);
818 if (UNLIKELY(vm.m_perBytecodeProfiler))
819 vm.m_perBytecodeProfiler->notifyDestruction(this);
821 if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState)
822 unlinkedCodeBlock()->setDidOptimize(FalseTriState);
824 #if ENABLE(VERBOSE_VALUE_PROFILE)
828 // We may be destroyed before any CodeBlocks that refer to us are destroyed.
829 // Consider that two CodeBlocks become unreachable at the same time. There
830 // is no guarantee about the order in which the CodeBlocks are destroyed.
831 // So, if we don't remove incoming calls, and get destroyed before the
832 // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
833 // destructor will try to remove nodes from our (no longer valid) linked list.
834 unlinkIncomingCalls();
836 // Note that our outgoing calls will be removed from other CodeBlocks'
837 // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
841 if (auto* jitData = m_jitData.get()) {
842 for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
843 stubInfo->aboutToDie();
847 #endif // ENABLE(JIT)
850 void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants)
852 auto scope = DECLARE_THROW_SCOPE(vm);
853 JSGlobalObject* globalObject = m_globalObject.get();
854 ExecState* exec = globalObject->globalExec();
856 for (const auto& entry : constants) {
857 const IdentifierSet& set = entry.first;
859 Structure* setStructure = globalObject->setStructure();
860 RETURN_IF_EXCEPTION(scope, void());
861 JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size());
862 RETURN_IF_EXCEPTION(scope, void());
864 for (auto setEntry : set) {
865 JSString* jsString = jsOwnedString(&vm, setEntry.get());
866 jsSet->add(exec, jsString);
867 RETURN_IF_EXCEPTION(scope, void());
869 m_constantRegisters[entry.second].set(vm, this, jsSet);
873 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
876 auto scope = DECLARE_THROW_SCOPE(vm);
877 JSGlobalObject* globalObject = m_globalObject.get();
878 ExecState* exec = globalObject->globalExec();
880 ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
881 size_t count = constants.size();
882 m_constantRegisters.resizeToFit(count);
883 for (size_t i = 0; i < count; i++) {
884 JSValue constant = constants[i].get();
886 if (!constant.isEmpty()) {
887 if (constant.isCell()) {
888 JSCell* cell = constant.asCell();
889 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) {
890 if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) {
891 ConcurrentJSLocker locker(symbolTable->m_lock);
892 symbolTable->prepareForTypeProfiling(locker);
895 SymbolTable* clone = symbolTable->cloneScopePart(vm);
896 if (wasCompiledWithDebuggingOpcodes())
897 clone->setRareDataCodeBlock(this);
900 } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) {
901 auto* templateObject = descriptor->createTemplateObject(exec);
902 RETURN_IF_EXCEPTION(scope, void());
903 constant = templateObject;
908 m_constantRegisters[i].set(vm, this, constant);
911 m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
914 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
916 RELEASE_ASSERT(alternative);
917 RELEASE_ASSERT(alternative->jitCode());
918 m_alternative.set(vm, this, alternative);
921 void CodeBlock::setNumParameters(int newValue)
923 m_numParameters = newValue;
925 m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0);
928 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
931 if (jitType() != JITType::DFGJIT)
933 DFG::JITCode* jitCode = m_jitCode->dfg();
934 return jitCode->osrEntryBlock();
935 #else // ENABLE(FTL_JIT)
937 #endif // ENABLE(FTL_JIT)
940 size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm)
942 CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
943 size_t extraMemoryAllocated = 0;
944 if (thisObject->m_metadata)
945 extraMemoryAllocated += thisObject->m_metadata->sizeInBytes();
946 RefPtr<JITCode> jitCode = thisObject->m_jitCode;
947 if (jitCode && !jitCode->isShared())
948 extraMemoryAllocated += jitCode->size();
949 return Base::estimatedSize(cell, vm) + extraMemoryAllocated;
952 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
954 CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
955 ASSERT_GC_OBJECT_INHERITS(thisObject, info());
956 Base::visitChildren(cell, visitor);
957 visitor.append(thisObject->m_ownerEdge);
958 thisObject->visitChildren(visitor);
961 void CodeBlock::visitChildren(SlotVisitor& visitor)
963 ConcurrentJSLocker locker(m_lock);
964 if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
965 visitor.appendUnbarriered(otherBlock);
967 size_t extraMemory = 0;
969 extraMemory += m_metadata->sizeInBytes();
970 if (m_jitCode && !m_jitCode->isShared())
971 extraMemory += m_jitCode->size();
972 visitor.reportExtraMemoryVisited(extraMemory);
974 stronglyVisitStrongReferences(locker, visitor);
975 stronglyVisitWeakReferences(locker, visitor);
977 VM::SpaceAndSet::setFor(*subspace()).add(this);
980 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
982 if (Options::forceCodeBlockLiveness())
985 if (shouldJettisonDueToOldAge(locker))
988 // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
989 // their weak references go stale. So if a basline JIT CodeBlock gets
990 // scanned, we can assume that this means that it's live.
991 if (!JITCode::isOptimizingJIT(jitType()))
997 bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm)
999 if (!JITCode::isOptimizingJIT(jitType()))
1001 return !vm.heap.isMarked(this);
1004 static Seconds timeToLive(JITType jitType)
1006 if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
1008 case JITType::InterpreterThunk:
1010 case JITType::BaselineJIT:
1012 case JITType::DFGJIT:
1014 case JITType::FTLJIT:
1017 return Seconds::infinity();
1022 case JITType::InterpreterThunk:
1024 case JITType::BaselineJIT:
1025 // Effectively 10 additional seconds, since BaselineJIT and
1026 // InterpreterThunk share a CodeBlock.
1028 case JITType::DFGJIT:
1030 case JITType::FTLJIT:
1033 return Seconds::infinity();
1037 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
1039 if (m_vm->heap.isMarked(this))
1042 if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
1045 if (timeSinceCreation() < timeToLive(jitType()))
1052 static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition)
1054 if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get()))
1057 if (!vm.heap.isMarked(transition.m_from.get()))
1062 #endif // ENABLE(DFG_JIT)
1064 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
1066 UNUSED_PARAM(visitor);
1070 if (jitType() == JITType::InterpreterThunk) {
1071 const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1072 const InstructionStream& instructionStream = instructions();
1073 for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1074 auto instruction = instructionStream.at(propertyAccessInstructions[i]);
1075 if (instruction->is<OpPutById>()) {
1076 auto& metadata = instruction->as<OpPutById>().metadata(this);
1077 StructureID oldStructureID = metadata.m_oldStructureID;
1078 StructureID newStructureID = metadata.m_newStructureID;
1079 if (!oldStructureID || !newStructureID)
1081 Structure* oldStructure =
1082 vm.heap.structureIDTable().get(oldStructureID);
1083 Structure* newStructure =
1084 vm.heap.structureIDTable().get(newStructureID);
1085 if (vm.heap.isMarked(oldStructure))
1086 visitor.appendUnbarriered(newStructure);
1093 if (JITCode::isJIT(jitType())) {
1094 if (auto* jitData = m_jitData.get()) {
1095 for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1096 stubInfo->propagateTransitions(visitor);
1099 #endif // ENABLE(JIT)
1102 if (JITCode::isOptimizingJIT(jitType())) {
1103 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1105 dfgCommon->recordedStatuses.markIfCheap(visitor);
1107 for (auto& weakReference : dfgCommon->weakStructureReferences)
1108 weakReference->markIfCheap(visitor);
1110 for (auto& transition : dfgCommon->transitions) {
1111 if (shouldMarkTransition(vm, transition)) {
1112 // If the following three things are live, then the target of the
1113 // transition is also live:
1115 // - This code block. We know it's live already because otherwise
1116 // we wouldn't be scanning ourselves.
1118 // - The code origin of the transition. Transitions may arise from
1119 // code that was inlined. They are not relevant if the user's
1120 // object that is required for the inlinee to run is no longer
1123 // - The source of the transition. The transition checks if some
1124 // heap location holds the source, and if so, stores the target.
1125 // Hence the source must be live for the transition to be live.
1127 // We also short-circuit the liveness if the structure is harmless
1128 // to mark (i.e. its global object and prototype are both already
1131 visitor.append(transition.m_to);
1135 #endif // ENABLE(DFG_JIT)
1138 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
1140 UNUSED_PARAM(visitor);
1144 if (vm.heap.isMarked(this))
1147 // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was
1148 // that we might decide that the CodeBlock should be jettisoned due to old age, so the
1149 // isMarked check doesn't protect us.
1150 if (!JITCode::isOptimizingJIT(jitType()))
1153 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1154 // Now check all of our weak references. If all of them are live, then we
1155 // have proved liveness and so we scan our strong references. If at end of
1156 // GC we still have not proved liveness, then this code block is toast.
1157 bool allAreLiveSoFar = true;
1158 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1159 JSCell* reference = dfgCommon->weakReferences[i].get();
1160 ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference));
1161 if (!vm.heap.isMarked(reference)) {
1162 allAreLiveSoFar = false;
1166 if (allAreLiveSoFar) {
1167 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
1168 if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) {
1169 allAreLiveSoFar = false;
1175 // If some weak references are dead, then this fixpoint iteration was
1177 if (!allAreLiveSoFar)
1180 // All weak references are live. Record this information so we don't
1181 // come back here again, and scan the strong references.
1182 visitor.appendUnbarriered(this);
1183 #endif // ENABLE(DFG_JIT)
1186 void CodeBlock::finalizeLLIntInlineCaches()
1189 const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1191 auto handleGetPutFromScope = [&] (auto& metadata) {
1192 GetPutInfo getPutInfo = metadata.m_getPutInfo;
1193 if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
1194 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
1196 WriteBarrierBase<Structure>& structure = metadata.m_structure;
1197 if (!structure || vm.heap.isMarked(structure.get()))
1199 if (Options::verboseOSR())
1200 dataLogF("Clearing scope access with structure %p.\n", structure.get());
1204 const InstructionStream& instructionStream = instructions();
1205 for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
1206 const auto curInstruction = instructionStream.at(propertyAccessInstructions[i]);
1207 switch (curInstruction->opcodeID()) {
1208 case op_get_by_id: {
1209 auto& metadata = curInstruction->as<OpGetById>().metadata(this);
1210 if (metadata.m_mode != GetByIdMode::Default)
1212 StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID;
1213 if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1215 if (Options::verboseOSR())
1216 dataLogF("Clearing LLInt property access.\n");
1217 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata);
1220 case op_get_by_id_direct: {
1221 auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this);
1222 StructureID oldStructureID = metadata.m_structureID;
1223 if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1225 if (Options::verboseOSR())
1226 dataLogF("Clearing LLInt property access.\n");
1227 metadata.m_structureID = 0;
1228 metadata.m_offset = 0;
1231 case op_put_by_id: {
1232 auto& metadata = curInstruction->as<OpPutById>().metadata(this);
1233 StructureID oldStructureID = metadata.m_oldStructureID;
1234 StructureID newStructureID = metadata.m_newStructureID;
1235 StructureChain* chain = metadata.m_structureChain.get();
1236 if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID)))
1237 && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID)))
1238 && (!chain || vm.heap.isMarked(chain)))
1240 if (Options::verboseOSR())
1241 dataLogF("Clearing LLInt put transition.\n");
1242 metadata.m_oldStructureID = 0;
1243 metadata.m_offset = 0;
1244 metadata.m_newStructureID = 0;
1245 metadata.m_structureChain.clear();
1248 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418
1249 // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution.
1250 case op_resolve_scope_for_hoisting_func_decl_in_eval:
1253 auto& metadata = curInstruction->as<OpToThis>().metadata(this);
1254 if (!metadata.m_cachedStructure || vm.heap.isMarked(metadata.m_cachedStructure.get()))
1256 if (Options::verboseOSR())
1257 dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.m_cachedStructure.get());
1258 metadata.m_cachedStructure.clear();
1259 metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC);
1262 case op_create_this: {
1263 auto& metadata = curInstruction->as<OpCreateThis>().metadata(this);
1264 auto& cacheWriteBarrier = metadata.m_cachedCallee;
1265 if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
1267 JSCell* cachedFunction = cacheWriteBarrier.get();
1268 if (vm.heap.isMarked(cachedFunction))
1270 if (Options::verboseOSR())
1271 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
1272 cacheWriteBarrier.clear();
1275 case op_resolve_scope: {
1276 // Right now this isn't strictly necessary. Any symbol tables that this will refer to
1277 // are for outer functions, and we refer to those functions strongly, and they refer
1278 // to the symbol table strongly. But it's nice to be on the safe side.
1279 auto& metadata = curInstruction->as<OpResolveScope>().metadata(this);
1280 WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable;
1281 if (!symbolTable || vm.heap.isMarked(symbolTable.get()))
1283 if (Options::verboseOSR())
1284 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
1285 symbolTable.clear();
1288 case op_get_from_scope:
1289 handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this));
1291 case op_put_to_scope:
1292 handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this));
1295 OpcodeID opcodeID = curInstruction->opcodeID();
1296 ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
1300 // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
1301 // then cleared the cache without GCing in between.
1302 m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
1303 auto clear = [&] () {
1304 const Instruction* instruction = std::get<1>(pair.key);
1305 OpcodeID opcode = instruction->opcodeID();
1306 if (opcode == op_get_by_id) {
1307 if (Options::verboseOSR())
1308 dataLogF("Clearing LLInt property access.\n");
1309 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this));
1314 if (!vm.heap.isMarked(std::get<0>(pair.key)))
1317 for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint* watchpoint : pair.value) {
1318 if (!watchpoint->key().isStillLive(vm))
1325 forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) {
1326 if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee.get())) {
1327 if (Options::verboseOSR())
1328 dataLog("Clearing LLInt call from ", *this, "\n");
1329 callLinkInfo.unlink();
1331 if (!!callLinkInfo.lastSeenCallee && !vm.heap.isMarked(callLinkInfo.lastSeenCallee.get()))
1332 callLinkInfo.lastSeenCallee.clear();
1337 CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
1340 m_jitData = std::make_unique<JITData>();
1344 void CodeBlock::finalizeBaselineJITInlineCaches()
1346 if (auto* jitData = m_jitData.get()) {
1347 for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1348 callLinkInfo->visitWeak(*vm());
1350 for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1351 stubInfo->visitWeakReferences(this);
1356 void CodeBlock::finalizeUnconditionally(VM& vm)
1360 updateAllPredictions();
1362 if (JITCode::couldBeInterpreted(jitType()))
1363 finalizeLLIntInlineCaches();
1367 finalizeBaselineJITInlineCaches();
1371 if (JITCode::isOptimizingJIT(jitType())) {
1372 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1373 dfgCommon->recordedStatuses.finalize(vm);
1375 #endif // ENABLE(DFG_JIT)
1377 VM::SpaceAndSet::setFor(*subspace()).remove(this);
1380 void CodeBlock::destroy(JSCell* cell)
1382 static_cast<CodeBlock*>(cell)->~CodeBlock();
1385 void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result)
1388 if (JITCode::isJIT(jitType())) {
1389 if (auto* jitData = m_jitData.get()) {
1390 for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
1391 result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
1392 for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
1393 result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
1394 for (ByValInfo* byValInfo : jitData->m_byValInfos)
1395 result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
1398 if (JITCode::isOptimizingJIT(jitType())) {
1399 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1400 for (auto& pair : dfgCommon->recordedStatuses.calls)
1401 result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get();
1402 for (auto& pair : dfgCommon->recordedStatuses.gets)
1403 result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get();
1404 for (auto& pair : dfgCommon->recordedStatuses.puts)
1405 result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get();
1406 for (auto& pair : dfgCommon->recordedStatuses.ins)
1407 result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get();
1412 UNUSED_PARAM(result);
1416 void CodeBlock::getICStatusMap(ICStatusMap& result)
1418 ConcurrentJSLocker locker(m_lock);
1419 getICStatusMap(locker, result);
1423 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
1425 ConcurrentJSLocker locker(m_lock);
1426 return ensureJITData(locker).m_stubInfos.add(accessType);
1429 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction)
1431 ConcurrentJSLocker locker(m_lock);
1432 return ensureJITData(locker).m_addICs.add(arithProfile, instruction);
1435 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction)
1437 ConcurrentJSLocker locker(m_lock);
1438 return ensureJITData(locker).m_mulICs.add(arithProfile, instruction);
1441 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction)
1443 ConcurrentJSLocker locker(m_lock);
1444 return ensureJITData(locker).m_subICs.add(arithProfile, instruction);
1447 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction)
1449 ConcurrentJSLocker locker(m_lock);
1450 return ensureJITData(locker).m_negICs.add(arithProfile, instruction);
1453 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
1455 ConcurrentJSLocker locker(m_lock);
1456 if (auto* jitData = m_jitData.get()) {
1457 for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
1458 if (stubInfo->codeOrigin == codeOrigin)
1465 ByValInfo* CodeBlock::addByValInfo()
1467 ConcurrentJSLocker locker(m_lock);
1468 return ensureJITData(locker).m_byValInfos.add();
1471 CallLinkInfo* CodeBlock::addCallLinkInfo()
1473 ConcurrentJSLocker locker(m_lock);
1474 return ensureJITData(locker).m_callLinkInfos.add();
1477 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
1479 ConcurrentJSLocker locker(m_lock);
1480 if (auto* jitData = m_jitData.get()) {
1481 for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
1482 if (callLinkInfo->codeOrigin() == CodeOrigin(index))
1483 return callLinkInfo;
1489 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
1491 ConcurrentJSLocker locker(m_lock);
1492 auto& jitData = ensureJITData(locker);
1493 jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
1494 return &jitData.m_rareCaseProfiles.last();
1497 RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
1499 if (auto* jitData = m_jitData.get()) {
1500 return tryBinarySearch<RareCaseProfile, int>(
1501 jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
1502 getRareCaseProfileBytecodeOffset);
1507 unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
1509 RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset);
1511 return profile->m_counter;
1515 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
1517 ConcurrentJSLocker locker(m_lock);
1518 ensureJITData(locker).m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
1521 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
1523 ConcurrentJSLocker locker(m_lock);
1524 ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
1527 void CodeBlock::resetJITData()
1529 RELEASE_ASSERT(!JITCode::isJIT(jitType()));
1530 ConcurrentJSLocker locker(m_lock);
1532 if (auto* jitData = m_jitData.get()) {
1533 // We can clear these because no other thread will have references to any stub infos, call
1534 // link infos, or by val infos if we don't have JIT code. Attempts to query these data
1535 // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
1536 // don't have JIT code.
1537 jitData->m_stubInfos.clear();
1538 jitData->m_callLinkInfos.clear();
1539 jitData->m_byValInfos.clear();
1540 // We can clear this because the DFG's queries to these data structures are guarded by whether
1541 // there is JIT code.
1542 jitData->m_rareCaseProfiles.clear();
1547 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
1549 // We strongly visit OSR exits targets because we don't want to deal with
1550 // the complexity of generating an exit target CodeBlock on demand and
1551 // guaranteeing that it matches the details of the CodeBlock we compiled
1552 // the OSR exit against.
1554 visitor.append(m_alternative);
1557 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1558 if (dfgCommon->inlineCallFrames) {
1559 for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
1560 ASSERT(inlineCallFrame->baselineCodeBlock);
1561 visitor.append(inlineCallFrame->baselineCodeBlock);
1567 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
1569 UNUSED_PARAM(locker);
1571 visitor.append(m_globalObject);
1572 visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked.
1573 visitor.append(m_unlinkedCode);
1575 m_rareData->m_directEvalCodeCache.visitAggregate(visitor);
1576 visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
1577 for (auto& functionExpr : m_functionExprs)
1578 visitor.append(functionExpr);
1579 for (auto& functionDecl : m_functionDecls)
1580 visitor.append(functionDecl);
1581 forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) {
1582 objectAllocationProfile.visitAggregate(visitor);
1586 if (auto* jitData = m_jitData.get()) {
1587 for (ByValInfo* byValInfo : jitData->m_byValInfos)
1588 visitor.append(byValInfo->cachedSymbol);
1593 if (JITCode::isOptimizingJIT(jitType()))
1594 visitOSRExitTargets(locker, visitor);
1598 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
1600 UNUSED_PARAM(visitor);
1603 if (!JITCode::isOptimizingJIT(jitType()))
1606 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1608 for (auto& transition : dfgCommon->transitions) {
1609 if (!!transition.m_codeOrigin)
1610 visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
1611 visitor.append(transition.m_from);
1612 visitor.append(transition.m_to);
1615 for (auto& weakReference : dfgCommon->weakReferences)
1616 visitor.append(weakReference);
1618 for (auto& weakStructureReference : dfgCommon->weakStructureReferences)
1619 visitor.append(weakStructureReference);
1621 dfgCommon->livenessHasBeenProved = true;
1625 CodeBlock* CodeBlock::baselineAlternative()
1628 CodeBlock* result = this;
1629 while (result->alternative())
1630 result = result->alternative();
1631 RELEASE_ASSERT(result);
1632 RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None);
1639 CodeBlock* CodeBlock::baselineVersion()
1642 JITType selfJITType = jitType();
1643 if (JITCode::isBaselineCode(selfJITType))
1645 CodeBlock* result = replacement();
1647 if (JITCode::isOptimizingJIT(selfJITType)) {
1648 // The replacement can be null if we've had a memory clean up and the executable
1649 // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless,
1650 // the current codeBlock is still live on the stack, and as an optimizing JIT
1651 // codeBlock, it will keep its baselineAlternative() alive for us to fetch below.
1654 // This can happen if we're creating the original CodeBlock for an executable.
1655 // Assume that we're the baseline CodeBlock.
1656 RELEASE_ASSERT(selfJITType == JITType::None);
1660 result = result->baselineAlternative();
1669 bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
1671 CodeBlock* replacement = this->replacement();
1672 return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
1675 bool CodeBlock::hasOptimizedReplacement()
1677 return hasOptimizedReplacement(jitType());
1681 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
1683 RELEASE_ASSERT(bytecodeOffset < instructions().size());
1684 return handlerForIndex(bytecodeOffset, requiredHandler);
1687 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
1691 return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
1694 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
1697 RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
1698 RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
1699 ASSERT(!!handlerForIndex(originalCallSite.bits()));
1700 CodeOrigin originalOrigin = codeOrigin(originalCallSite);
1701 return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
1703 // We never create new on-the-fly exception handling
1704 // call sites outside the DFG/FTL inline caches.
1705 UNUSED_PARAM(originalCallSite);
1706 RELEASE_ASSERT_NOT_REACHED();
1707 return CallSiteIndex(0u);
1713 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
1715 auto& instruction = instructions().at(bytecodeOffset);
1716 OpCatch op = instruction->as<OpCatch>();
1717 auto& metadata = op.metadata(this);
1718 if (!!metadata.m_buffer) {
1719 #if !ASSERT_DISABLED
1720 ConcurrentJSLocker locker(m_lock);
1722 auto* rareData = m_rareData.get();
1724 for (auto& profile : rareData->m_catchProfiles) {
1725 if (profile.get() == metadata.m_buffer) {
1735 ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset);
1738 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset)
1740 BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis();
1742 // We get the live-out set of variables at op_catch, not the live-in. This
1743 // is because the variables that the op_catch defines might be dead, and
1744 // we can avoid profiling them and extracting them when doing OSR entry
1747 auto nextOffset = instructions().at(bytecodeOffset).next().offset();
1748 FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset);
1749 Vector<VirtualRegister> liveOperands;
1750 liveOperands.reserveInitialCapacity(liveLocals.bitCount());
1751 liveLocals.forEachSetBit([&] (unsigned liveLocal) {
1752 liveOperands.append(virtualRegisterForLocal(liveLocal));
1755 for (int i = 0; i < numParameters(); ++i)
1756 liveOperands.append(virtualRegisterForArgument(i));
1758 auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size());
1759 RELEASE_ASSERT(profiles->m_size == liveOperands.size());
1760 for (unsigned i = 0; i < profiles->m_size; ++i)
1761 profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset();
1763 createRareDataIfNecessary();
1765 // The compiler thread will read this pointer value and then proceed to dereference it
1766 // if it is not null. We need to make sure all above stores happen before this store so
1767 // the compiler thread reads fully initialized data.
1768 WTF::storeStoreFence();
1770 op.metadata(this).m_buffer = profiles.get();
1772 ConcurrentJSLocker locker(m_lock);
1773 m_rareData->m_catchProfiles.append(WTFMove(profiles));
1777 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
1779 RELEASE_ASSERT(m_rareData);
1780 Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
1781 unsigned index = callSiteIndex.bits();
1782 for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
1783 HandlerInfo& handler = exceptionHandlers[i];
1784 if (handler.start <= index && handler.end > index) {
1785 exceptionHandlers.remove(i);
1790 RELEASE_ASSERT_NOT_REACHED();
1793 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
1795 RELEASE_ASSERT(bytecodeOffset < instructions().size());
1796 return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
1799 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
1806 expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1810 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
1812 m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
1813 divot += sourceOffset();
1814 column += line ? 1 : firstLineColumnOffset();
1815 line += ownerExecutable()->firstLine();
1818 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
1820 const InstructionStream& instructionStream = instructions();
1821 for (const auto& it : instructionStream) {
1822 if (it->is<OpDebug>()) {
1824 unsigned opDebugLine;
1825 unsigned opDebugColumn;
1826 expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn);
1827 if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
1834 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
1836 ConcurrentJSLocker locker(m_lock);
1839 if (auto* jitData = m_jitData.get())
1840 jitData->m_rareCaseProfiles.shrinkToFit();
1843 if (shrinkMode == EarlyShrink) {
1844 m_constantRegisters.shrinkToFit();
1845 m_constantsSourceCodeRepresentation.shrinkToFit();
1848 m_rareData->m_switchJumpTables.shrinkToFit();
1849 m_rareData->m_stringSwitchJumpTables.shrinkToFit();
1851 } // else don't shrink these, because we would have already pointed pointers into these tables.
1855 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
1857 noticeIncomingCall(callerFrame);
1858 ConcurrentJSLocker locker(m_lock);
1859 ensureJITData(locker).m_incomingCalls.push(incoming);
1862 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
1864 noticeIncomingCall(callerFrame);
1866 ConcurrentJSLocker locker(m_lock);
1867 ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
1870 #endif // ENABLE(JIT)
1872 void CodeBlock::unlinkIncomingCalls()
1874 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1875 m_incomingLLIntCalls.begin()->unlink();
1877 JITData* jitData = nullptr;
1879 ConcurrentJSLocker locker(m_lock);
1880 jitData = m_jitData.get();
1883 while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
1884 jitData->m_incomingCalls.begin()->unlink(*vm());
1885 while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
1886 jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm());
1888 #endif // ENABLE(JIT)
1891 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
1893 noticeIncomingCall(callerFrame);
1894 m_incomingLLIntCalls.push(incoming);
1897 CodeBlock* CodeBlock::newReplacement()
1899 return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
1903 CodeBlock* CodeBlock::replacement()
1905 const ClassInfo* classInfo = this->classInfo(*vm());
1907 if (classInfo == FunctionCodeBlock::info())
1908 return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall);
1910 if (classInfo == EvalCodeBlock::info())
1911 return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
1913 if (classInfo == ProgramCodeBlock::info())
1914 return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
1916 if (classInfo == ModuleProgramCodeBlock::info())
1917 return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
1919 RELEASE_ASSERT_NOT_REACHED();
1923 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
1925 const ClassInfo* classInfo = this->classInfo(*vm());
1927 if (classInfo == FunctionCodeBlock::info()) {
1928 if (isConstructor())
1929 return DFG::functionForConstructCapabilityLevel(this);
1930 return DFG::functionForCallCapabilityLevel(this);
1933 if (classInfo == EvalCodeBlock::info())
1934 return DFG::evalCapabilityLevel(this);
1936 if (classInfo == ProgramCodeBlock::info())
1937 return DFG::programCapabilityLevel(this);
1939 if (classInfo == ModuleProgramCodeBlock::info())
1940 return DFG::programCapabilityLevel(this);
1942 RELEASE_ASSERT_NOT_REACHED();
1943 return DFG::CannotCompile;
1946 #endif // ENABLE(JIT)
1948 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
1950 #if !ENABLE(DFG_JIT)
1952 UNUSED_PARAM(detail);
1957 CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
1959 RELEASE_ASSERT(reason != Profiler::NotJettisoned);
1962 if (DFG::shouldDumpDisassembly()) {
1963 dataLog("Jettisoning ", *this);
1964 if (mode == CountReoptimization)
1965 dataLog(" and counting reoptimization");
1966 dataLog(" due to ", reason);
1968 dataLog(", ", *detail);
1972 if (reason == Profiler::JettisonDueToWeakReference) {
1973 if (DFG::shouldDumpDisassembly()) {
1974 dataLog(*this, " will be jettisoned because of the following dead references:\n");
1975 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1976 for (auto& transition : dfgCommon->transitions) {
1977 JSCell* origin = transition.m_codeOrigin.get();
1978 JSCell* from = transition.m_from.get();
1979 JSCell* to = transition.m_to.get();
1980 if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from))
1982 dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
1984 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
1985 JSCell* weak = dfgCommon->weakReferences[i].get();
1986 if (vm.heap.isMarked(weak))
1988 dataLog(" Weak reference ", RawPointer(weak), ".\n");
1992 #endif // ENABLE(DFG_JIT)
1994 DeferGCForAWhile deferGC(*heap());
1996 // We want to accomplish two things here:
1997 // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
1998 // we should OSR exit at the top of the next bytecode instruction after the return.
1999 // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2002 if (JITCode::isOptimizingJIT(jitType()))
2003 jitCode()->dfgCommon()->clearWatchpoints();
2005 if (reason != Profiler::JettisonDueToOldAge) {
2006 Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get();
2007 if (UNLIKELY(compilation))
2008 compilation->setJettisonReason(reason, detail);
2010 // This accomplishes (1), and does its own book-keeping about whether it has already happened.
2011 if (!jitCode()->dfgCommon()->invalidate()) {
2012 // We've already been invalidated.
2013 RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())));
2018 if (DFG::shouldDumpDisassembly())
2019 dataLog(" Did invalidate ", *this, "\n");
2021 // Count the reoptimization if that's what the user wanted.
2022 if (mode == CountReoptimization) {
2023 // FIXME: Maybe this should call alternative().
2024 // https://bugs.webkit.org/show_bug.cgi?id=123677
2025 baselineAlternative()->countReoptimization();
2026 if (DFG::shouldDumpDisassembly())
2027 dataLog(" Did count reoptimization for ", *this, "\n");
2030 if (this != replacement()) {
2031 // This means that we were never the entrypoint. This can happen for OSR entry code
2037 alternative()->optimizeAfterWarmUp();
2039 if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps)
2040 tallyFrequentExitSites();
2041 #endif // ENABLE(DFG_JIT)
2043 // Jettison can happen during GC. We don't want to install code to a dead executable
2044 // because that would add a dead object to the remembered set.
2045 if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))
2050 ConcurrentJSLocker locker(m_lock);
2051 if (JITData* jitData = m_jitData.get()) {
2052 for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
2053 callLinkInfo->setClearedByJettison();
2058 // This accomplishes (2).
2059 ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind());
2062 if (DFG::shouldDumpDisassembly())
2063 dataLog(" Did install baseline version of ", *this, "\n");
2064 #endif // ENABLE(DFG_JIT)
2067 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2069 auto* inlineCallFrame = codeOrigin.inlineCallFrame();
2070 if (!inlineCallFrame)
2071 return globalObject();
2072 return inlineCallFrame->baselineCodeBlock->globalObject();
2075 class RecursionCheckFunctor {
2077 RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
2078 : m_startCallFrame(startCallFrame)
2079 , m_codeBlock(codeBlock)
2080 , m_depthToCheck(depthToCheck)
2081 , m_foundStartCallFrame(false)
2082 , m_didRecurse(false)
2085 StackVisitor::Status operator()(StackVisitor& visitor) const
2087 CallFrame* currentCallFrame = visitor->callFrame();
2089 if (currentCallFrame == m_startCallFrame)
2090 m_foundStartCallFrame = true;
2092 if (m_foundStartCallFrame) {
2093 if (visitor->callFrame()->codeBlock() == m_codeBlock) {
2094 m_didRecurse = true;
2095 return StackVisitor::Done;
2098 if (!m_depthToCheck--)
2099 return StackVisitor::Done;
2102 return StackVisitor::Continue;
2105 bool didRecurse() const { return m_didRecurse; }
2108 CallFrame* m_startCallFrame;
2109 CodeBlock* m_codeBlock;
2110 mutable unsigned m_depthToCheck;
2111 mutable bool m_foundStartCallFrame;
2112 mutable bool m_didRecurse;
2115 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2117 CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2119 if (Options::verboseCallLink())
2120 dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
2123 if (!m_shouldAlwaysBeInlined)
2126 if (!callerCodeBlock) {
2127 m_shouldAlwaysBeInlined = false;
2128 if (Options::verboseCallLink())
2129 dataLog(" Clearing SABI because caller is native.\n");
2133 if (!hasBaselineJITProfiling())
2136 if (!DFG::mightInlineFunction(this))
2139 if (!canInline(capabilityLevelState()))
2142 if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2143 m_shouldAlwaysBeInlined = false;
2144 if (Options::verboseCallLink())
2145 dataLog(" Clearing SABI because caller is too large.\n");
2149 if (callerCodeBlock->jitType() == JITType::InterpreterThunk) {
2150 // If the caller is still in the interpreter, then we can't expect inlining to
2151 // happen anytime soon. Assume it's profitable to optimize it separately. This
2152 // ensures that a function is SABI only if it is called no more frequently than
2153 // any of its callers.
2154 m_shouldAlwaysBeInlined = false;
2155 if (Options::verboseCallLink())
2156 dataLog(" Clearing SABI because caller is in LLInt.\n");
2160 if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
2161 m_shouldAlwaysBeInlined = false;
2162 if (Options::verboseCallLink())
2163 dataLog(" Clearing SABI bcause caller was already optimized.\n");
2167 if (callerCodeBlock->codeType() != FunctionCode) {
2168 // If the caller is either eval or global code, assume that that won't be
2169 // optimized anytime soon. For eval code this is particularly true since we
2170 // delay eval optimization by a *lot*.
2171 m_shouldAlwaysBeInlined = false;
2172 if (Options::verboseCallLink())
2173 dataLog(" Clearing SABI because caller is not a function.\n");
2177 // Recursive calls won't be inlined.
2178 RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
2179 vm()->topCallFrame->iterate(functor);
2181 if (functor.didRecurse()) {
2182 if (Options::verboseCallLink())
2183 dataLog(" Clearing SABI because recursion was detected.\n");
2184 m_shouldAlwaysBeInlined = false;
2188 if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
2189 dataLog("In call from ", FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to ", *this, ": caller's DFG capability level is not set.\n");
2193 if (canCompile(callerCodeBlock->capabilityLevelState()))
2196 if (Options::verboseCallLink())
2197 dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
2199 m_shouldAlwaysBeInlined = false;
2203 unsigned CodeBlock::reoptimizationRetryCounter() const
2206 ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2207 return m_reoptimizationRetryCounter;
2210 #endif // ENABLE(JIT)
2214 const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const
2217 if (auto* jitData = m_jitData.get()) {
2218 if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get())
2222 return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters();
2226 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
2229 return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register));
2233 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
2235 return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
2238 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
2240 return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size());
2246 void CodeBlock::countReoptimization()
2248 m_reoptimizationRetryCounter++;
2249 if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2250 m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2253 unsigned CodeBlock::numberOfDFGCompiles()
2255 ASSERT(JITCode::isBaselineCode(jitType()));
2256 if (Options::testTheFTL()) {
2257 if (m_didFailFTLCompilation)
2259 return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2261 CodeBlock* replacement = this->replacement();
2262 return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter;
2265 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2267 if (codeType() == EvalCode)
2268 return Options::evalThresholdMultiplier();
2273 double CodeBlock::optimizationThresholdScalingFactor()
2275 // This expression arises from doing a least-squares fit of
2277 // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2279 // against the data points:
2282 // 10 0.9 (smallest reasonable code block)
2283 // 200 1.0 (typical small-ish code block)
2284 // 320 1.2 (something I saw in 3d-cube that I wanted to optimize)
2285 // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize)
2286 // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort)
2287 // 10000 6.0 (similar to above)
2289 // I achieve the minimization using the following Mathematica code:
2291 // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2293 // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2296 // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2297 // {a, b, c, d}][[2]]
2299 // And the code below (to initialize a, b, c, d) is generated by:
2301 // Print["const double " <> ToString[#[[1]]] <> " = " <>
2302 // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2304 // We've long known the following to be true:
2305 // - Small code blocks are cheap to optimize and so we should do it sooner rather
2307 // - Large code blocks are expensive to optimize and so we should postpone doing so,
2308 // and sometimes have a large enough threshold that we never optimize them.
2309 // - The difference in cost is not totally linear because (a) just invoking the
2310 // DFG incurs some base cost and (b) for large code blocks there is enough slop
2311 // in the correlation between instruction count and the actual compilation cost
2312 // that for those large blocks, the instruction count should not have a strong
2313 // influence on our threshold.
2315 // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2316 // example where the heuristics were right (code block in 3d-cube with instruction
2317 // count 320, which got compiled early as it should have been) and one where they were
2318 // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2319 // to compile and didn't run often enough to warrant compilation in my opinion), and
2320 // then threw in additional data points that represented my own guess of what our
2321 // heuristics should do for some round-numbered examples.
2323 // The expression to which I decided to fit the data arose because I started with an
2324 // affine function, and then did two things: put the linear part in an Abs to ensure
2325 // that the fit didn't end up choosing a negative value of c (which would result in
2326 // the function turning over and going negative for large x) and I threw in a Sqrt
2327 // term because Sqrt represents my intution that the function should be more sensitive
2328 // to small changes in small values of x, but less sensitive when x gets large.
2330 // Note that the current fit essentially eliminates the linear portion of the
2331 // expression (c == 0.0).
2332 const double a = 0.061504;
2333 const double b = 1.02406;
2334 const double c = 0.0;
2335 const double d = 0.825914;
2337 double bytecodeCost = this->bytecodeCost();
2339 ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2341 double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost;
2343 result *= codeTypeThresholdMultiplier();
2345 if (Options::verboseOSR()) {
2347 *this, ": bytecode cost is ", bytecodeCost,
2348 ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2354 static int32_t clipThreshold(double threshold)
2356 if (threshold < 1.0)
2359 if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
2360 return std::numeric_limits<int32_t>::max();
2362 return static_cast<int32_t>(threshold);
2365 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
2367 return clipThreshold(
2368 static_cast<double>(desiredThreshold) *
2369 optimizationThresholdScalingFactor() *
2370 (1 << reoptimizationRetryCounter()));
2373 bool CodeBlock::checkIfOptimizationThresholdReached()
2376 if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
2377 if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
2378 == DFG::Worklist::Compiled) {
2379 optimizeNextInvocation();
2385 return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
2389 auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
2391 DFG::OSRExitBase& exit = exitState.exit;
2392 if (!exitKindMayJettison(exit.m_kind)) {
2393 // FIXME: We may want to notice that we're frequently exiting
2394 // at an op_catch that we didn't compile an entrypoint for, and
2395 // then trigger a reoptimization of this CodeBlock:
2396 // https://bugs.webkit.org/show_bug.cgi?id=175842
2397 return OptimizeAction::None;
2403 CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
2404 ASSERT(baselineCodeBlock == baselineAlternative());
2405 if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
2406 return OptimizeAction::ReoptimizeNow;
2408 // We want to figure out if there's a possibility that we're in a loop. For the outermost
2409 // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
2410 // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
2411 // problem is the inlined functions, which might also have loops, but whose baseline versions
2412 // don't know where to look for the exit count. Figure out if those loops are severe enough
2413 // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
2414 // Otherwise, we should use the normal reoptimization trigger.
2416 bool didTryToEnterInLoop = false;
2417 for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
2418 if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) {
2419 didTryToEnterInLoop = true;
2424 uint32_t exitCountThreshold = didTryToEnterInLoop
2425 ? exitCountThresholdForReoptimizationFromLoop()
2426 : exitCountThresholdForReoptimization();
2428 if (m_osrExitCounter > exitCountThreshold)
2429 return OptimizeAction::ReoptimizeNow;
2431 // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
2432 baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
2433 return OptimizeAction::None;
2437 void CodeBlock::optimizeNextInvocation()
2439 if (Options::verboseOSR())
2440 dataLog(*this, ": Optimizing next invocation.\n");
2441 m_jitExecuteCounter.setNewThreshold(0, this);
2444 void CodeBlock::dontOptimizeAnytimeSoon()
2446 if (Options::verboseOSR())
2447 dataLog(*this, ": Not optimizing anytime soon.\n");
2448 m_jitExecuteCounter.deferIndefinitely();
2451 void CodeBlock::optimizeAfterWarmUp()
2453 if (Options::verboseOSR())
2454 dataLog(*this, ": Optimizing after warm-up.\n");
2456 m_jitExecuteCounter.setNewThreshold(
2457 adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
2461 void CodeBlock::optimizeAfterLongWarmUp()
2463 if (Options::verboseOSR())
2464 dataLog(*this, ": Optimizing after long warm-up.\n");
2466 m_jitExecuteCounter.setNewThreshold(
2467 adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
2471 void CodeBlock::optimizeSoon()
2473 if (Options::verboseOSR())
2474 dataLog(*this, ": Optimizing soon.\n");
2476 m_jitExecuteCounter.setNewThreshold(
2477 adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
2481 void CodeBlock::forceOptimizationSlowPathConcurrently()
2483 if (Options::verboseOSR())
2484 dataLog(*this, ": Forcing slow path concurrently.\n");
2485 m_jitExecuteCounter.forceSlowPathConcurrently();
2489 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
2491 JITType type = jitType();
2492 if (type != JITType::BaselineJIT) {
2493 dataLog(*this, ": expected to have baseline code but have ", type, "\n");
2494 CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type));
2497 CodeBlock* replacement = this->replacement();
2498 bool hasReplacement = (replacement && replacement != this);
2499 if ((result == CompilationSuccessful) != hasReplacement) {
2500 dataLog(*this, ": we have result = ", result, " but ");
2501 if (replacement == this)
2502 dataLog("we are our own replacement.\n");
2504 dataLog("our replacement is ", pointerDump(replacement), "\n");
2505 RELEASE_ASSERT_NOT_REACHED();
2509 case CompilationSuccessful:
2510 RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType()));
2511 optimizeNextInvocation();
2513 case CompilationFailed:
2514 dontOptimizeAnytimeSoon();
2516 case CompilationDeferred:
2517 // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
2518 // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
2519 // necessarily guarantee anything. So, we make sure that even if that
2520 // function ends up being a no-op, we still eventually retry and realize
2521 // that we have optimized code ready.
2522 optimizeAfterWarmUp();
2524 case CompilationInvalidated:
2525 // Retry with exponential backoff.
2526 countReoptimization();
2527 optimizeAfterWarmUp();
2531 dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
2532 RELEASE_ASSERT_NOT_REACHED();
2537 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
2539 ASSERT(JITCode::isOptimizingJIT(jitType()));
2540 // Compute this the lame way so we don't saturate. This is called infrequently
2541 // enough that this loop won't hurt us.
2542 unsigned result = desiredThreshold;
2543 for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
2544 unsigned newResult = result << 1;
2545 if (newResult < result)
2546 return std::numeric_limits<uint32_t>::max();
2552 uint32_t CodeBlock::exitCountThresholdForReoptimization()
2554 return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
2557 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
2559 return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
2562 bool CodeBlock::shouldReoptimizeNow()
2564 return osrExitCounter() >= exitCountThresholdForReoptimization();
2567 bool CodeBlock::shouldReoptimizeFromLoopNow()
2569 return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
2573 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset)
2575 auto instruction = instructions().at(bytecodeOffset);
2576 switch (instruction->opcodeID()) {
2578 case Op::opcodeID: \
2579 return &instruction->as<Op>().metadata(this).m_arrayProfile;
2581 FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE)
2584 case OpGetById::opcodeID: {
2585 auto bytecode = instruction->as<OpGetById>();
2586 auto& metadata = bytecode.metadata(this);
2587 if (metadata.m_mode == GetByIdMode::ArrayLength)
2588 return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile;
2598 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2600 ConcurrentJSLocker locker(m_lock);
2601 return getArrayProfile(locker, bytecodeOffset);
2605 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins()
2607 return m_jitCode->dfgCommon()->codeOrigins;
2610 size_t CodeBlock::numberOfDFGIdentifiers() const
2612 if (!JITCode::isOptimizingJIT(jitType()))
2615 return m_jitCode->dfgCommon()->dfgIdentifiers.size();
2618 const Identifier& CodeBlock::identifier(int index) const
2620 size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
2621 if (static_cast<unsigned>(index) < unlinkedIdentifiers)
2622 return m_unlinkedCode->identifier(index);
2623 ASSERT(JITCode::isOptimizingJIT(jitType()));
2624 return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
2626 #endif // ENABLE(DFG_JIT)
2628 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2630 ConcurrentJSLocker locker(m_lock);
2632 numberOfLiveNonArgumentValueProfiles = 0;
2633 numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2635 forEachValueProfile([&](ValueProfile& profile) {
2636 unsigned numSamples = profile.totalNumberOfSamples();
2637 if (numSamples > ValueProfile::numberOfBuckets)
2638 numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2639 numberOfSamplesInProfiles += numSamples;
2640 if (profile.m_bytecodeOffset < 0) {
2641 profile.computeUpdatedPrediction(locker);
2644 if (profile.numberOfSamples() || profile.m_prediction != SpecNone)
2645 numberOfLiveNonArgumentValueProfiles++;
2646 profile.computeUpdatedPrediction(locker);
2649 if (auto* rareData = m_rareData.get()) {
2650 for (auto& profileBucket : rareData->m_catchProfiles) {
2651 profileBucket->forEach([&] (ValueProfileAndOperand& profile) {
2652 profile.m_profile.computeUpdatedPrediction(locker);
2658 lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
2662 void CodeBlock::updateAllValueProfilePredictions()
2664 unsigned ignoredValue1, ignoredValue2;
2665 updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
2668 void CodeBlock::updateAllArrayPredictions()
2670 ConcurrentJSLocker locker(m_lock);
2672 forEachArrayProfile([&](ArrayProfile& profile) {
2673 profile.computeUpdatedPrediction(locker, this);
2676 forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) {
2677 profile.updateProfile();
2681 void CodeBlock::updateAllPredictions()
2683 updateAllValueProfilePredictions();
2684 updateAllArrayPredictions();
2687 bool CodeBlock::shouldOptimizeNow()
2689 if (Options::verboseOSR())
2690 dataLog("Considering optimizing ", *this, "...\n");
2692 if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2695 updateAllArrayPredictions();
2697 unsigned numberOfLiveNonArgumentValueProfiles;
2698 unsigned numberOfSamplesInProfiles;
2699 updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2701 if (Options::verboseOSR()) {
2703 "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
2704 (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(),
2705 numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(),
2706 (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(),
2707 numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles());
2710 if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate())
2711 && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2712 && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2715 ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2716 m_optimizationDelayCounter++;
2717 optimizeAfterWarmUp();
2722 void CodeBlock::tallyFrequentExitSites()
2724 ASSERT(JITCode::isOptimizingJIT(jitType()));
2725 ASSERT(alternative()->jitType() == JITType::BaselineJIT);
2727 CodeBlock* profiledBlock = alternative();
2729 switch (jitType()) {
2730 case JITType::DFGJIT: {
2731 DFG::JITCode* jitCode = m_jitCode->dfg();
2732 for (auto& exit : jitCode->osrExit)
2733 exit.considerAddingAsFrequentExitSite(profiledBlock);
2738 case JITType::FTLJIT: {
2739 // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
2740 // vector contains a totally different type, that just so happens to behave like
2741 // DFG::JITCode::osrExit.
2742 FTL::JITCode* jitCode = m_jitCode->ftl();
2743 for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
2744 FTL::OSRExit& exit = jitCode->osrExit[i];
2745 exit.considerAddingAsFrequentExitSite(profiledBlock);
2752 RELEASE_ASSERT_NOT_REACHED();
2756 #endif // ENABLE(DFG_JIT)
2758 void CodeBlock::notifyLexicalBindingUpdate()
2760 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
2761 // https://bugs.webkit.org/show_bug.cgi?id=193347
2762 if (scriptMode() == JSParserScriptMode::Module)
2764 JSGlobalObject* globalObject = m_globalObject.get();
2765 JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
2766 SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
2768 ConcurrentJSLocker locker(m_lock);
2770 auto isShadowed = [&] (UniquedStringImpl* uid) {
2771 ConcurrentJSLocker locker(symbolTable->m_lock);
2772 return symbolTable->contains(locker, uid);
2775 const InstructionStream& instructionStream = instructions();
2776 for (const auto& instruction : instructionStream) {
2777 OpcodeID opcodeID = instruction->opcodeID();
2779 case op_resolve_scope: {
2780 auto bytecode = instruction->as<OpResolveScope>();
2781 auto& metadata = bytecode.metadata(this);
2782 ResolveType originalResolveType = metadata.m_resolveType;
2783 if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) {
2784 const Identifier& ident = identifier(bytecode.m_var);
2785 if (isShadowed(ident.impl()))
2786 metadata.m_globalLexicalBindingEpoch = 0;
2788 metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch();
2798 #if ENABLE(VERBOSE_VALUE_PROFILE)
2799 void CodeBlock::dumpValueProfiles()
2801 dataLog("ValueProfile for ", *this, ":\n");
2802 forEachValueProfile([](ValueProfile& profile) {
2803 if (profile.m_bytecodeOffset < 0) {
2804 ASSERT(profile.m_bytecodeOffset == -1);
2805 dataLogF(" arg = %u: ", i);
2807 dataLogF(" bc = %d: ", profile.m_bytecodeOffset);
2808 if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) {
2809 dataLogF("<empty>\n");
2812 profile.dump(WTF::dataFile());
2815 dataLog("RareCaseProfile for ", *this, ":\n");
2816 if (auto* jitData = m_jitData.get()) {
2817 for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
2818 dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
2821 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
2823 unsigned CodeBlock::frameRegisterCount()
2825 switch (jitType()) {
2826 case JITType::InterpreterThunk:
2827 return LLInt::frameRegisterCountFor(this);
2830 case JITType::BaselineJIT:
2831 return JIT::frameRegisterCountFor(this);
2832 #endif // ENABLE(JIT)
2835 case JITType::DFGJIT:
2836 case JITType::FTLJIT:
2837 return jitCode()->dfgCommon()->frameRegisterCount;
2838 #endif // ENABLE(DFG_JIT)
2841 RELEASE_ASSERT_NOT_REACHED();
2846 int CodeBlock::stackPointerOffset()
2848 return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
2851 size_t CodeBlock::predictedMachineCodeSize()
2854 // This will be called from CodeBlock::CodeBlock before either m_vm or the
2855 // instructions have been initialized. It's OK to return 0 because what will really
2856 // matter is the recomputation of this value when the slow path is triggered.
2860 if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
2861 return 0; // It's as good of a prediction as we'll get.
2863 // Be conservative: return a size that will be an overestimation 84% of the time.
2864 double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() +
2865 vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation();
2867 // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
2868 // here is OK, since this whole method is just a heuristic.
2869 if (multiplier < 0 || multiplier > 1000)
2872 double doubleResult = multiplier * bytecodeCost();
2874 // Be even more paranoid: silently reject values that won't fit into a size_t. If
2875 // the function is so huge that we can't even fit it into virtual memory then we
2876 // should probably have some other guards in place to prevent us from even getting
2878 if (doubleResult > std::numeric_limits<size_t>::max())
2881 return static_cast<size_t>(doubleResult);
2884 String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
2886 for (auto& constantRegister : m_constantRegisters) {
2887 if (constantRegister.get().isEmpty())
2889 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) {
2890 ConcurrentJSLocker locker(symbolTable->m_lock);
2891 auto end = symbolTable->end(locker);
2892 for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) {
2893 if (ptr->value.varOffset() == VarOffset(virtualRegister)) {
2894 // FIXME: This won't work from the compilation thread.
2895 // https://bugs.webkit.org/show_bug.cgi?id=115300
2896 return ptr->key.get();
2901 if (virtualRegister == thisRegister())
2903 if (virtualRegister.isArgument())
2904 return makeString("arguments[", pad(' ', 3, virtualRegister.toArgument()), ']');
2906 return emptyString();
2909 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset)
2911 auto instruction = instructions().at(bytecodeOffset);
2912 switch (instruction->opcodeID()) {
2915 case Op::opcodeID: \
2916 return &instruction->as<Op>().metadata(this).m_profile;
2918 FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE)
2928 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
2930 if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset))
2931 return valueProfile->computeUpdatedPrediction(locker);
2935 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset)
2937 return *tryGetValueProfileForBytecodeOffset(bytecodeOffset);
2940 void CodeBlock::validate()
2942 BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
2944 FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0);
2946 if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) {
2947 beginValidationDidFail();
2948 dataLog(" Wrong number of bits in result!\n");
2949 dataLog(" Result: ", liveAtHead, "\n");
2950 dataLog(" Bit count: ", liveAtHead.numBits(), "\n");
2951 endValidationDidFail();
2954 for (unsigned i = m_numCalleeLocals; i--;) {
2955 VirtualRegister reg = virtualRegisterForLocal(i);
2957 if (liveAtHead[i]) {
2958 beginValidationDidFail();
2959 dataLog(" Variable ", reg, " is expected to be dead.\n");
2960 dataLog(" Result: ", liveAtHead, "\n");
2961 endValidationDidFail();
2965 const InstructionStream& instructionStream = instructions();
2966 for (const auto& instruction : instructionStream) {
2967 OpcodeID opcode = instruction->opcodeID();
2968 if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) {
2969 if (opcode == op_catch || opcode == op_enter) {
2970 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be
2971 // inside of a try block because they are responsible for bootstrapping state. And they
2972 // are never allowed throw an exception because of this. We rely on this when compiling
2973 // in the DFG. Because an entrypoint never throws, the bytecode generator will never
2974 // allow once inside a try block.
2975 beginValidationDidFail();
2976 dataLog(" entrypoint not allowed inside a try block.");
2977 endValidationDidFail();
2983 void CodeBlock::beginValidationDidFail()
2985 dataLog("Validation failure in ", *this, ":\n");
2989 void CodeBlock::endValidationDidFail()
2994 dataLog("Validation failure.\n");
2995 RELEASE_ASSERT_NOT_REACHED();
2998 void CodeBlock::addBreakpoint(unsigned numBreakpoints)
3000 m_numBreakpoints += numBreakpoints;
3001 ASSERT(m_numBreakpoints);
3002 if (JITCode::isOptimizingJIT(jitType()))
3003 jettison(Profiler::JettisonDueToDebuggerBreakpoint);
3006 void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
3008 m_steppingMode = mode;
3009 if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
3010 jettison(Profiler::JettisonDueToDebuggerStepping);
3013 int CodeBlock::outOfLineJumpOffset(const Instruction* pc)
3015 int offset = bytecodeOffset(pc);
3016 return m_unlinkedCode->outOfLineJumpOffset(offset);
3019 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc)
3021 int offset = bytecodeOffset(pc);
3022 int target = m_unlinkedCode->outOfLineJumpOffset(offset);
3023 return instructions().at(offset + target).ptr();
3026 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
3028 return arithProfileForPC(instructions().at(bytecodeOffset).ptr());
3031 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc)
3033 switch (pc->opcodeID()) {
3035 return &pc->as<OpNegate>().metadata(this).m_arithProfile;
3037 return &pc->as<OpAdd>().metadata(this).m_arithProfile;
3039 return &pc->as<OpMul>().metadata(this).m_arithProfile;
3041 return &pc->as<OpSub>().metadata(this).m_arithProfile;
3043 return &pc->as<OpDiv>().metadata(this).m_arithProfile;
3051 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset)
3053 if (!hasBaselineJITProfiling())
3055 ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset);
3058 return profile->tookSpecialFastPath();
3062 DFG::CapabilityLevel CodeBlock::capabilityLevel()
3064 DFG::CapabilityLevel result = computeCapabilityLevel();
3065 m_capabilityLevelState = result;
3070 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler()
3072 if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets())
3074 const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();
3075 for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) {
3076 // Because op_profile_control_flow is emitted at the beginning of every basic block, finding
3077 // the next op_profile_control_flow will give us the text range of a single basic block.
3078 size_t startIdx = bytecodeOffsets[i];
3079 auto instruction = instructions().at(startIdx);
3080 RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow);
3081 auto bytecode = instruction->as<OpProfileControlFlow>();
3082 auto& metadata = bytecode.metadata(this);
3083 int basicBlockStartOffset = bytecode.m_textOffset;
3084 int basicBlockEndOffset;
3085 if (i + 1 < offsetsLength) {
3086 size_t endIdx = bytecodeOffsets[i + 1];
3087 auto endInstruction = instructions().at(endIdx);
3088 RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow);
3089 basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1;
3091 basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace.
3092 basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before.
3095 // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more
3096 // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than
3097 // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node
3098 // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different
3099 // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript
3100 // program. The condition:
3101 // (basicBlockEndOffset < basicBlockStartOffset)
3102 // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic
3103 // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These
3104 // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same
3105 // internal data structure, so if any of them execute, it will record the same textual basic block in the
3106 // JavaScript program as executing.
3107 // At the bytecode level, this situation looks like:
3108 // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset)
3110 // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m).
3112 // m: op_profile_control_flow
3113 if (basicBlockEndOffset < basicBlockStartOffset) {
3114 RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock.
3115 metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();
3119 BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset);
3121 // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset]
3122 // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation.
3123 // This is necessary because in the original source text of a JavaScript program,
3124 // function literals form new basic blocks boundaries, but they aren't represented
3125 // inside the CodeBlock's instruction stream.
3126 auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) {
3127 const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable();
3128 int functionStart = executable->typeProfilingStartOffset();
3129 int functionEnd = executable->typeProfilingEndOffset();
3130 if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset)
3131 basicBlockLocation->insertGap(functionStart, functionEnd);
3134 for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls)
3135 insertFunctionGaps(executable);
3136 for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs)
3137 insertFunctionGaps(executable);
3139 metadata.m_basicBlockLocation = basicBlockLocation;
3144 void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map)
3146 ConcurrentJSLocker locker(m_lock);
3147 ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
3150 Optional<CodeOrigin> CodeBlock::findPC(void* pc)
3153 ConcurrentJSLocker locker(m_lock);
3154 if (auto* jitData = m_jitData.get()) {
3155 if (jitData->m_pcToCodeOriginMap) {
3156 if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
3160 for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
3161 if (stubInfo->containsPC(pc))
3162 return Optional<CodeOrigin>(stubInfo->codeOrigin);
3167 if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
3170 return WTF::nullopt;
3172 #endif // ENABLE(JIT)
3174 Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex)
3176 Optional<unsigned> bytecodeOffset;
3177 JITType jitType = this->jitType();
3178 if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) {
3180 bytecodeOffset = callSiteIndex.bits();
3182 Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits());
3183 bytecodeOffset = this->bytecodeOffset(instruction);
3185 } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) {
3187 RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex));
3188 CodeOrigin origin = codeOrigin(callSiteIndex);
3189 bytecodeOffset = origin.bytecodeIndex();
3191 RELEASE_ASSERT_NOT_REACHED();
3195 return bytecodeOffset;
3198 int32_t CodeBlock::thresholdForJIT(int32_t threshold)
3200 switch (unlinkedCodeBlock()->didOptimize()) {
3204 return threshold * 4;
3206 return threshold / 2;
3208 ASSERT_NOT_REACHED();
3212 void CodeBlock::jitAfterWarmUp()
3214 m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
3217 void CodeBlock::jitSoon()
3219 m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
3222 bool CodeBlock::hasInstalledVMTrapBreakpoints() const
3224 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3225 // This function may be called from a signal handler. We need to be
3226 // careful to not call anything that is not signal handler safe, e.g.
3227 // we should not perturb the refCount of m_jitCode.
3228 if (!JITCode::isOptimizingJIT(jitType()))
3230 return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints();
3236 bool CodeBlock::installVMTrapBreakpoints()
3238 #if ENABLE(SIGNAL_BASED_VM_TRAPS)
3239 // This function may be called from a signal handler. We need to be
3240 // careful to not call anything that is not signal handler safe, e.g.
3241 // we should not perturb the refCount of m_jitCode.
3242 if (!JITCode::isOptimizingJIT(jitType()))
3244 auto& commonData = *m_jitCode->dfgCommon();
3245 commonData.installVMTrapBreakpoints(this);
3248 UNREACHABLE_FOR_PLATFORM();
3253 void CodeBlock::dumpMathICStats()
3255 #if ENABLE(MATH_IC_STATS)
3256 double numAdds = 0.0;
3257 double totalAddSize = 0.0;
3258 double numMuls = 0.0;
3259 double totalMulSize = 0.0;
3260 double numNegs = 0.0;
3261 double totalNegSize = 0.0;
3262 double numSubs = 0.0;
3263 double totalSubSize = 0.0;
3265 auto countICs = [&] (CodeBlock* codeBlock) {
3266 if (auto* jitData = codeBlock->m_jitData.get()) {
3267 for (JITAddIC* addIC : jitData->m_addICs) {
3269 totalAddSize += addIC->codeSize();
3272 for (JITMulIC* mulIC : jitData->m_mulICs) {
3274 totalMulSize += mulIC->codeSize();
3277 for (JITNegIC* negIC : jitData->m_negICs) {
3279 totalNegSize += negIC->codeSize();
3282 for (JITSubIC* subIC : jitData->m_subICs) {
3284 totalSubSize += subIC->codeSize();
3288 heap()->forEachCodeBlock(countICs);
3290 dataLog("Num Adds: ", numAdds, "\n");
3291 dataLog("Total Add size in bytes: ", totalAddSize, "\n");
3292 dataLog("Average Add size: ", totalAddSize / numAdds, "\n");
3294 dataLog("Num Muls: ", numMuls, "\n");
3295 dataLog("Total Mul size in bytes: ", totalMulSize, "\n");
3296 dataLog("Average Mul size: ", totalMulSize / numMuls, "\n");
3298 dataLog("Num Negs: ", numNegs, "\n");
3299 dataLog("Total Neg size in bytes: ", totalNegSize, "\n");
3300 dataLog("Average Neg size: ", totalNegSize / numNegs, "\n");
3302 dataLog("Num Subs: ", numSubs, "\n");
3303 dataLog("Total Sub size in bytes: ", totalSubSize, "\n");
3304 dataLog("Average Sub size: ", totalSubSize / numSubs, "\n");
3306 dataLog("-----------------------\n");
3310 void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock)
3312 Printer::setPrinter(record, toCString(codeBlock));
3319 void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock)
3321 if (UNLIKELY(!codeBlock)) {
3322 out.print("<null codeBlock>");
3325 out.print(*codeBlock);