2 * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "ArithProfile.h"
34 #include "BasicBlockLocation.h"
35 #include "BytecodeGenerator.h"
36 #include "BytecodeLivenessAnalysis.h"
37 #include "BytecodeUseDef.h"
38 #include "CallLinkStatus.h"
39 #include "CodeBlockSet.h"
40 #include "DFGCapabilities.h"
41 #include "DFGCommon.h"
42 #include "DFGDriver.h"
43 #include "DFGJITCode.h"
44 #include "DFGWorklist.h"
46 #include "EvalCodeBlock.h"
47 #include "FunctionCodeBlock.h"
48 #include "FunctionExecutableDump.h"
49 #include "GetPutInfo.h"
50 #include "InlineCallFrame.h"
51 #include "Interpreter.h"
53 #include "JITMathIC.h"
54 #include "JSCInlines.h"
55 #include "JSCJSValue.h"
56 #include "JSFunction.h"
57 #include "JSLexicalEnvironment.h"
58 #include "JSModuleEnvironment.h"
59 #include "LLIntData.h"
60 #include "LLIntEntrypoint.h"
61 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"
62 #include "LowLevelInterpreter.h"
63 #include "ModuleProgramCodeBlock.h"
64 #include "PCToCodeOriginMap.h"
65 #include "PolymorphicAccess.h"
66 #include "ProfilerDatabase.h"
67 #include "ProgramCodeBlock.h"
68 #include "ReduceWhitespace.h"
70 #include "SlotVisitorInlines.h"
71 #include "StackVisitor.h"
72 #include "StructureStubInfo.h"
73 #include "TypeLocationCache.h"
74 #include "TypeProfiler.h"
75 #include "UnlinkedInstructionStream.h"
76 #include "VMInlines.h"
77 #include "WebAssemblyCodeBlock.h"
78 #include "WebAssemblyExecutable.h"
79 #include <wtf/BagToHashMap.h>
80 #include <wtf/CommaPrinter.h>
81 #include <wtf/SimpleStats.h>
82 #include <wtf/StringExtras.h>
83 #include <wtf/StringPrintStream.h>
84 #include <wtf/text/UniquedStringImpl.h>
87 #include "RegisterAtOffsetList.h"
91 #include "DFGOperations.h"
95 #include "FTLJITCode.h"
100 const ClassInfo CodeBlock::s_info = {
102 CREATE_METHOD_TABLE(CodeBlock)
105 CString CodeBlock::inferredName() const
107 switch (codeType()) {
113 return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
118 return CString("", 0);
122 bool CodeBlock::hasHash() const
127 bool CodeBlock::isSafeToComputeHash() const
129 return !isCompilationThread();
132 CodeBlockHash CodeBlock::hash() const
135 RELEASE_ASSERT(isSafeToComputeHash());
136 m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
141 CString CodeBlock::sourceCodeForTools() const
143 if (codeType() != FunctionCode)
144 return ownerScriptExecutable()->source().toUTF8();
146 SourceProvider* provider = source();
147 FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
148 UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
149 unsigned unlinkedStartOffset = unlinked->startOffset();
150 unsigned linkedStartOffset = executable->source().startOffset();
151 int delta = linkedStartOffset - unlinkedStartOffset;
152 unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
153 unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
156 provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
159 CString CodeBlock::sourceCodeOnOneLine() const
161 return reduceWhitespace(sourceCodeForTools());
164 CString CodeBlock::hashAsStringIfPossible() const
166 if (hasHash() || isSafeToComputeHash())
167 return toCString(hash());
171 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
173 out.print(inferredName(), "#", hashAsStringIfPossible());
174 out.print(":[", RawPointer(this), "->");
176 out.print(RawPointer(alternative()), "->");
177 out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
179 if (codeType() == FunctionCode)
180 out.print(specializationKind());
181 out.print(", ", instructionCount());
182 if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
183 out.print(" (ShouldAlwaysBeInlined)");
184 if (ownerScriptExecutable()->neverInline())
185 out.print(" (NeverInline)");
186 if (ownerScriptExecutable()->neverOptimize())
187 out.print(" (NeverOptimize)");
188 else if (ownerScriptExecutable()->neverFTLOptimize())
189 out.print(" (NeverFTLOptimize)");
190 if (ownerScriptExecutable()->didTryToEnterInLoop())
191 out.print(" (DidTryToEnterInLoop)");
192 if (ownerScriptExecutable()->isStrictMode())
193 out.print(" (StrictMode)");
194 if (m_didFailJITCompilation)
195 out.print(" (JITFail)");
196 if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
197 out.print(" (FTLFail)");
198 if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
199 out.print(" (HadFTLReplacement)");
203 void CodeBlock::dump(PrintStream& out) const
205 dumpAssumingJITType(out, jitType());
208 static CString idName(int id0, const Identifier& ident)
210 return toCString(ident.impl(), "(@id", id0, ")");
213 CString CodeBlock::registerName(int r) const
215 if (isConstantRegisterIndex(r))
216 return constantName(r);
218 return toCString(VirtualRegister(r));
221 CString CodeBlock::constantName(int index) const
223 JSValue value = getConstant(index);
224 return toCString(value, "(", VirtualRegister(index), ")");
227 static CString regexpToSourceString(RegExp* regExp)
229 char postfix[5] = { '/', 0, 0, 0, 0 };
231 if (regExp->global())
232 postfix[index++] = 'g';
233 if (regExp->ignoreCase())
234 postfix[index++] = 'i';
235 if (regExp->multiline())
236 postfix[index] = 'm';
237 if (regExp->sticky())
238 postfix[index++] = 'y';
239 if (regExp->unicode())
240 postfix[index++] = 'u';
242 return toCString("/", regExp->pattern().impl(), postfix);
245 static CString regexpName(int re, RegExp* regexp)
247 return toCString(regexpToSourceString(regexp), "(@re", re, ")");
250 NEVER_INLINE static const char* debugHookName(int debugHookType)
252 switch (static_cast<DebugHookType>(debugHookType)) {
253 case DidEnterCallFrame:
254 return "didEnterCallFrame";
255 case WillLeaveCallFrame:
256 return "willLeaveCallFrame";
257 case WillExecuteStatement:
258 return "willExecuteStatement";
259 case WillExecuteExpression:
260 return "willExecuteExpression";
261 case WillExecuteProgram:
262 return "willExecuteProgram";
263 case DidExecuteProgram:
264 return "didExecuteProgram";
265 case DidReachBreakpoint:
266 return "didReachBreakpoint";
269 RELEASE_ASSERT_NOT_REACHED();
273 void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
275 int r0 = (++it)->u.operand;
276 int r1 = (++it)->u.operand;
278 printLocationAndOp(out, exec, location, it, op);
279 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
282 void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
284 int r0 = (++it)->u.operand;
285 int r1 = (++it)->u.operand;
286 int r2 = (++it)->u.operand;
287 printLocationAndOp(out, exec, location, it, op);
288 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
291 void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
293 int r0 = (++it)->u.operand;
294 int offset = (++it)->u.operand;
295 printLocationAndOp(out, exec, location, it, op);
296 out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
299 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
302 switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
306 case op_get_by_id_proto_load:
307 op = "get_by_id_proto_load";
309 case op_get_by_id_unset:
310 op = "get_by_id_unset";
312 case op_get_array_length:
316 RELEASE_ASSERT_NOT_REACHED();
317 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
321 int r0 = (++it)->u.operand;
322 int r1 = (++it)->u.operand;
323 int id0 = (++it)->u.operand;
324 printLocationAndOp(out, exec, location, it, op);
325 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
326 it += 4; // Increment up to the value profiler.
329 static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
334 out.printf("%s = %p", name, structure);
336 PropertyOffset offset = structure->getConcurrently(ident.impl());
337 if (offset != invalidOffset)
338 out.printf(" (offset = %d)", offset);
341 static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
343 out.printf("chain = %p: [", chain);
345 for (WriteBarrier<Structure>* currentStructure = chain->head();
347 ++currentStructure) {
352 dumpStructure(out, "struct", currentStructure->get(), ident);
357 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
359 Instruction* instruction = instructions().begin() + location;
361 const Identifier& ident = identifier(instruction[3].u.operand);
363 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
365 if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
366 out.printf(" llint(array_length)");
367 else if (StructureID structureID = instruction[4].u.structureID) {
368 Structure* structure = m_vm->heap.structureIDTable().get(structureID);
369 out.printf(" llint(");
370 dumpStructure(out, "struct", structure, ident);
372 if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_by_id_proto_load)
373 out.printf(" proto(%p)", instruction[6].u.pointer);
377 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
378 StructureStubInfo& stubInfo = *stubPtr;
379 if (stubInfo.resetByGC)
380 out.print(" (Reset By GC)");
384 Structure* baseStructure = nullptr;
385 PolymorphicAccess* stub = nullptr;
387 switch (stubInfo.cacheType) {
388 case CacheType::GetByIdSelf:
390 baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get();
392 case CacheType::Stub:
394 stub = stubInfo.u.stub;
396 case CacheType::Unset:
399 case CacheType::ArrayLength:
400 out.printf("ArrayLength");
403 RELEASE_ASSERT_NOT_REACHED();
409 dumpStructure(out, "struct", baseStructure, ident);
413 out.print(", ", *stub);
422 void CodeBlock::printPutByIdCacheStatus(PrintStream& out, int location, const StubInfoMap& map)
424 Instruction* instruction = instructions().begin() + location;
426 const Identifier& ident = identifier(instruction[2].u.operand);
428 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
430 out.print(", ", instruction[8].u.putByIdFlags);
432 if (StructureID structureID = instruction[4].u.structureID) {
433 Structure* structure = m_vm->heap.structureIDTable().get(structureID);
434 out.print(" llint(");
435 if (StructureID newStructureID = instruction[6].u.structureID) {
436 Structure* newStructure = m_vm->heap.structureIDTable().get(newStructureID);
437 dumpStructure(out, "prev", structure, ident);
439 dumpStructure(out, "next", newStructure, ident);
440 if (StructureChain* chain = instruction[7].u.structureChain.get()) {
442 dumpChain(out, chain, ident);
445 dumpStructure(out, "struct", structure, ident);
450 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
451 StructureStubInfo& stubInfo = *stubPtr;
452 if (stubInfo.resetByGC)
453 out.print(" (Reset By GC)");
457 switch (stubInfo.cacheType) {
458 case CacheType::PutByIdReplace:
459 out.print("replace, ");
460 dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident);
462 case CacheType::Stub: {
463 out.print("stub, ", *stubInfo.u.stub);
466 case CacheType::Unset:
470 RELEASE_ASSERT_NOT_REACHED();
480 void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
482 int dst = (++it)->u.operand;
483 int func = (++it)->u.operand;
484 int argCount = (++it)->u.operand;
485 int registerOffset = (++it)->u.operand;
486 printLocationAndOp(out, exec, location, it, op);
487 out.print(registerName(dst), ", ", registerName(func), ", ", argCount, ", ", registerOffset);
488 out.print(" (this at ", virtualRegisterForArgument(0, -registerOffset), ")");
489 if (cacheDumpMode == DumpCaches) {
490 LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
491 if (callLinkInfo->lastSeenCallee) {
493 " llint(%p, exec %p)",
494 callLinkInfo->lastSeenCallee.get(),
495 callLinkInfo->lastSeenCallee->executable());
498 if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
499 JSFunction* target = info->lastSeenCallee();
501 out.printf(" jit(%p, exec %p)", target, target->executable());
504 if (jitType() != JITCode::FTLJIT)
505 out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
512 dumpArrayProfiling(out, it, hasPrintedProfiling);
513 dumpValueProfiling(out, it, hasPrintedProfiling);
516 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
518 int r0 = (++it)->u.operand;
519 int id0 = (++it)->u.operand;
520 int r1 = (++it)->u.operand;
521 printLocationAndOp(out, exec, location, it, op);
522 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
526 void CodeBlock::dumpSource()
528 dumpSource(WTF::dataFile());
531 void CodeBlock::dumpSource(PrintStream& out)
533 ScriptExecutable* executable = ownerScriptExecutable();
534 if (executable->isFunctionExecutable()) {
535 FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
536 StringView source = functionExecutable->source().provider()->getRange(
537 functionExecutable->parametersStartOffset(),
538 functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
540 out.print("function ", inferredName(), source);
543 out.print(executable->source().view());
546 void CodeBlock::dumpBytecode()
548 dumpBytecode(WTF::dataFile());
551 void CodeBlock::dumpBytecode(PrintStream& out)
553 // We only use the ExecState* for things that don't actually lead to JS execution,
554 // like converting a JSString to a String. Hence the globalExec is appropriate.
555 ExecState* exec = m_globalObject->globalExec();
557 size_t instructionCount = 0;
559 for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
564 ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
565 static_cast<unsigned long>(instructions().size()),
566 static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
567 m_numParameters, m_numCalleeLocals, m_numVars);
568 out.print("; scope at ", scopeRegister());
571 StubInfoMap stubInfos;
572 CallLinkInfoMap callLinkInfos;
573 getStubInfoMap(stubInfos);
574 getCallLinkInfoMap(callLinkInfos);
576 const Instruction* begin = instructions().begin();
577 const Instruction* end = instructions().end();
578 for (const Instruction* it = begin; it != end; ++it)
579 dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
581 if (numberOfIdentifiers()) {
582 out.printf("\nIdentifiers:\n");
585 out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
587 } while (i != numberOfIdentifiers());
590 if (!m_constantRegisters.isEmpty()) {
591 out.printf("\nConstants:\n");
594 const char* sourceCodeRepresentationDescription = nullptr;
595 switch (m_constantsSourceCodeRepresentation[i]) {
596 case SourceCodeRepresentation::Double:
597 sourceCodeRepresentationDescription = ": in source as double";
599 case SourceCodeRepresentation::Integer:
600 sourceCodeRepresentationDescription = ": in source as integer";
602 case SourceCodeRepresentation::Other:
603 sourceCodeRepresentationDescription = "";
606 out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
608 } while (i < m_constantRegisters.size());
611 if (size_t count = m_unlinkedCode->numberOfRegExps()) {
612 out.printf("\nm_regexps:\n");
615 out.printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
620 dumpExceptionHandlers(out);
622 if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
623 out.printf("Switch Jump Tables:\n");
626 out.printf(" %1d = {\n", i);
628 Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
629 for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
632 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
636 } while (i < m_rareData->m_switchJumpTables.size());
639 if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
640 out.printf("\nString Switch Jump Tables:\n");
643 out.printf(" %1d = {\n", i);
644 StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
645 for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
646 out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
649 } while (i < m_rareData->m_stringSwitchJumpTables.size());
655 void CodeBlock::dumpExceptionHandlers(PrintStream& out)
657 if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
658 out.printf("\nException Handlers:\n");
661 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
662 out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n",
663 i + 1, handler.start, handler.end, handler.target, handler.typeName());
665 } while (i < m_rareData->m_exceptionHandlers.size());
669 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
671 if (hasPrintedProfiling) {
677 hasPrintedProfiling = true;
680 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
682 ConcurrentJSLocker locker(m_lock);
685 CString description = it->u.profile->briefDescription(locker);
686 if (!description.length())
688 beginDumpProfiling(out, hasPrintedProfiling);
689 out.print(description);
692 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
694 ConcurrentJSLocker locker(m_lock);
697 if (!it->u.arrayProfile)
699 CString description = it->u.arrayProfile->briefDescription(locker, this);
700 if (!description.length())
702 beginDumpProfiling(out, hasPrintedProfiling);
703 out.print(description);
706 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
708 if (!profile || !profile->m_counter)
711 beginDumpProfiling(out, hasPrintedProfiling);
712 out.print(name, profile->m_counter);
715 void CodeBlock::dumpArithProfile(PrintStream& out, ArithProfile* profile, bool& hasPrintedProfiling)
720 beginDumpProfiling(out, hasPrintedProfiling);
721 out.print("results: ", *profile);
724 void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
726 out.printf("[%4d] %-17s ", location, op);
729 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
731 printLocationAndOp(out, exec, location, it, op);
732 out.printf("%s", registerName(operand).data());
735 void CodeBlock::dumpBytecode(
736 PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
737 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
739 int location = it - begin;
740 bool hasPrintedProfiling = false;
741 OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
744 printLocationAndOp(out, exec, location, it, "enter");
748 int r0 = (++it)->u.operand;
749 printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
752 case op_create_direct_arguments: {
753 int r0 = (++it)->u.operand;
754 printLocationAndOp(out, exec, location, it, "create_direct_arguments");
755 out.printf("%s", registerName(r0).data());
758 case op_create_scoped_arguments: {
759 int r0 = (++it)->u.operand;
760 int r1 = (++it)->u.operand;
761 printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
762 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
765 case op_create_cloned_arguments: {
766 int r0 = (++it)->u.operand;
767 printLocationAndOp(out, exec, location, it, "create_cloned_arguments");
768 out.printf("%s", registerName(r0).data());
771 case op_argument_count: {
772 int r0 = (++it)->u.operand;
773 printLocationOpAndRegisterOperand(out, exec, location, it, "argument_count", r0);
776 case op_get_argument: {
777 int r0 = (++it)->u.operand;
778 int index = (++it)->u.operand;
779 printLocationOpAndRegisterOperand(out, exec, location, it, "argument", r0);
780 out.printf(", %d", index);
781 dumpValueProfiling(out, it, hasPrintedProfiling);
784 case op_create_rest: {
785 int r0 = (++it)->u.operand;
786 int r1 = (++it)->u.operand;
787 unsigned argumentOffset = (++it)->u.unsignedValue;
788 printLocationAndOp(out, exec, location, it, "create_rest");
789 out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data());
790 out.printf("ArgumentsOffset: %u", argumentOffset);
793 case op_get_rest_length: {
794 int r0 = (++it)->u.operand;
795 printLocationAndOp(out, exec, location, it, "get_rest_length");
796 out.printf("%s, ", registerName(r0).data());
797 unsigned argumentOffset = (++it)->u.unsignedValue;
798 out.printf("ArgumentsOffset: %u", argumentOffset);
801 case op_create_this: {
802 int r0 = (++it)->u.operand;
803 int r1 = (++it)->u.operand;
804 unsigned inferredInlineCapacity = (++it)->u.operand;
805 unsigned cachedFunction = (++it)->u.operand;
806 printLocationAndOp(out, exec, location, it, "create_this");
807 out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
811 int r0 = (++it)->u.operand;
812 printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
813 Structure* structure = (++it)->u.structure.get();
815 out.print(", cache(struct = ", RawPointer(structure), ")");
816 out.print(", ", (++it)->u.toThisStatus);
820 int r0 = (++it)->u.operand;
821 printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
824 case op_new_object: {
825 int r0 = (++it)->u.operand;
826 unsigned inferredInlineCapacity = (++it)->u.operand;
827 printLocationAndOp(out, exec, location, it, "new_object");
828 out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
829 ++it; // Skip object allocation profile.
833 int dst = (++it)->u.operand;
834 int argv = (++it)->u.operand;
835 int argc = (++it)->u.operand;
836 printLocationAndOp(out, exec, location, it, "new_array");
837 out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
838 ++it; // Skip array allocation profile.
841 case op_new_array_with_spread: {
842 int dst = (++it)->u.operand;
843 int argv = (++it)->u.operand;
844 int argc = (++it)->u.operand;
845 printLocationAndOp(out, exec, location, it, "new_array_with_spread");
846 out.printf("%s, %s, %d, ", registerName(dst).data(), registerName(argv).data(), argc);
847 unsigned bitVectorIndex = (++it)->u.unsignedValue;
848 const BitVector& bitVector = m_unlinkedCode->bitVector(bitVectorIndex);
849 out.print("BitVector:", bitVectorIndex, ":");
850 for (unsigned i = 0; i < static_cast<unsigned>(argc); i++) {
851 if (bitVector.get(i))
859 int dst = (++it)->u.operand;
860 int arg = (++it)->u.operand;
861 printLocationAndOp(out, exec, location, it, "spread");
862 out.printf("%s, %s", registerName(dst).data(), registerName(arg).data());
865 case op_new_array_with_size: {
866 int dst = (++it)->u.operand;
867 int length = (++it)->u.operand;
868 printLocationAndOp(out, exec, location, it, "new_array_with_size");
869 out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
870 ++it; // Skip array allocation profile.
873 case op_new_array_buffer: {
874 int dst = (++it)->u.operand;
875 int argv = (++it)->u.operand;
876 int argc = (++it)->u.operand;
877 printLocationAndOp(out, exec, location, it, "new_array_buffer");
878 out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
879 ++it; // Skip array allocation profile.
882 case op_new_regexp: {
883 int r0 = (++it)->u.operand;
884 int re0 = (++it)->u.operand;
885 printLocationAndOp(out, exec, location, it, "new_regexp");
886 out.printf("%s, ", registerName(r0).data());
887 if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
888 out.printf("%s", regexpName(re0, regexp(re0)).data());
890 out.printf("bad_regexp(%d)", re0);
894 int r0 = (++it)->u.operand;
895 int r1 = (++it)->u.operand;
896 printLocationAndOp(out, exec, location, it, "mov");
897 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
900 case op_profile_type: {
901 int r0 = (++it)->u.operand;
906 printLocationAndOp(out, exec, location, it, "op_profile_type");
907 out.printf("%s", registerName(r0).data());
910 case op_profile_control_flow: {
911 BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
912 printLocationAndOp(out, exec, location, it, "profile_control_flow");
913 out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
917 printUnaryOp(out, exec, location, it, "not");
921 printBinaryOp(out, exec, location, it, "eq");
925 printUnaryOp(out, exec, location, it, "eq_null");
929 printBinaryOp(out, exec, location, it, "neq");
933 printUnaryOp(out, exec, location, it, "neq_null");
937 printBinaryOp(out, exec, location, it, "stricteq");
941 printBinaryOp(out, exec, location, it, "nstricteq");
945 printBinaryOp(out, exec, location, it, "less");
949 printBinaryOp(out, exec, location, it, "lesseq");
953 printBinaryOp(out, exec, location, it, "greater");
957 printBinaryOp(out, exec, location, it, "greatereq");
961 int r0 = (++it)->u.operand;
962 printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
966 int r0 = (++it)->u.operand;
967 printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
971 printUnaryOp(out, exec, location, it, "to_number");
972 dumpValueProfiling(out, it, hasPrintedProfiling);
976 printUnaryOp(out, exec, location, it, "to_string");
980 printUnaryOp(out, exec, location, it, "negate");
981 ++it; // op_negate has an extra operand for the ArithProfile.
985 printBinaryOp(out, exec, location, it, "add");
990 printBinaryOp(out, exec, location, it, "mul");
995 printBinaryOp(out, exec, location, it, "div");
1000 printBinaryOp(out, exec, location, it, "mod");
1004 printBinaryOp(out, exec, location, it, "pow");
1008 printBinaryOp(out, exec, location, it, "sub");
1013 printBinaryOp(out, exec, location, it, "lshift");
1017 printBinaryOp(out, exec, location, it, "rshift");
1021 printBinaryOp(out, exec, location, it, "urshift");
1025 printBinaryOp(out, exec, location, it, "bitand");
1030 printBinaryOp(out, exec, location, it, "bitxor");
1035 printBinaryOp(out, exec, location, it, "bitor");
1039 case op_overrides_has_instance: {
1040 int r0 = (++it)->u.operand;
1041 int r1 = (++it)->u.operand;
1042 int r2 = (++it)->u.operand;
1043 printLocationAndOp(out, exec, location, it, "overrides_has_instance");
1044 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1047 case op_instanceof: {
1048 int r0 = (++it)->u.operand;
1049 int r1 = (++it)->u.operand;
1050 int r2 = (++it)->u.operand;
1051 printLocationAndOp(out, exec, location, it, "instanceof");
1052 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1055 case op_instanceof_custom: {
1056 int r0 = (++it)->u.operand;
1057 int r1 = (++it)->u.operand;
1058 int r2 = (++it)->u.operand;
1059 int r3 = (++it)->u.operand;
1060 printLocationAndOp(out, exec, location, it, "instanceof_custom");
1061 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
1065 printUnaryOp(out, exec, location, it, "unsigned");
1069 printUnaryOp(out, exec, location, it, "typeof");
1073 printUnaryOp(out, exec, location, it, "is_empty");
1076 case op_is_undefined: {
1077 printUnaryOp(out, exec, location, it, "is_undefined");
1080 case op_is_boolean: {
1081 printUnaryOp(out, exec, location, it, "is_boolean");
1084 case op_is_number: {
1085 printUnaryOp(out, exec, location, it, "is_number");
1088 case op_is_cell_with_type: {
1089 int r0 = (++it)->u.operand;
1090 int r1 = (++it)->u.operand;
1091 int type = (++it)->u.operand;
1092 printLocationAndOp(out, exec, location, it, "is_cell_with_type");
1093 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), type);
1096 case op_is_object: {
1097 printUnaryOp(out, exec, location, it, "is_object");
1100 case op_is_object_or_null: {
1101 printUnaryOp(out, exec, location, it, "is_object_or_null");
1104 case op_is_function: {
1105 printUnaryOp(out, exec, location, it, "is_function");
1109 printBinaryOp(out, exec, location, it, "in");
1112 case op_try_get_by_id: {
1113 int r0 = (++it)->u.operand;
1114 int r1 = (++it)->u.operand;
1115 int id0 = (++it)->u.operand;
1116 printLocationAndOp(out, exec, location, it, "try_get_by_id");
1117 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1118 dumpValueProfiling(out, it, hasPrintedProfiling);
1122 case op_get_by_id_proto_load:
1123 case op_get_by_id_unset:
1124 case op_get_array_length: {
1125 printGetByIdOp(out, exec, location, it);
1126 printGetByIdCacheStatus(out, exec, location, stubInfos);
1127 dumpValueProfiling(out, it, hasPrintedProfiling);
1130 case op_get_by_id_with_this: {
1131 printLocationAndOp(out, exec, location, it, "get_by_id_with_this");
1132 int r0 = (++it)->u.operand;
1133 int r1 = (++it)->u.operand;
1134 int r2 = (++it)->u.operand;
1135 int id0 = (++it)->u.operand;
1136 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), idName(id0, identifier(id0)).data());
1137 dumpValueProfiling(out, it, hasPrintedProfiling);
1140 case op_get_by_val_with_this: {
1141 int r0 = (++it)->u.operand;
1142 int r1 = (++it)->u.operand;
1143 int r2 = (++it)->u.operand;
1144 int r3 = (++it)->u.operand;
1145 printLocationAndOp(out, exec, location, it, "get_by_val_with_this");
1146 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
1147 dumpValueProfiling(out, it, hasPrintedProfiling);
1150 case op_put_by_id: {
1151 printPutByIdOp(out, exec, location, it, "put_by_id");
1152 printPutByIdCacheStatus(out, location, stubInfos);
1155 case op_put_by_id_with_this: {
1156 int r0 = (++it)->u.operand;
1157 int r1 = (++it)->u.operand;
1158 int id0 = (++it)->u.operand;
1159 int r2 = (++it)->u.operand;
1160 printLocationAndOp(out, exec, location, it, "put_by_id_with_this");
1161 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), registerName(r2).data());
1164 case op_put_by_val_with_this: {
1165 int r0 = (++it)->u.operand;
1166 int r1 = (++it)->u.operand;
1167 int r2 = (++it)->u.operand;
1168 int r3 = (++it)->u.operand;
1169 printLocationAndOp(out, exec, location, it, "put_by_val_with_this");
1170 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
1173 case op_put_getter_by_id: {
1174 int r0 = (++it)->u.operand;
1175 int id0 = (++it)->u.operand;
1176 int n0 = (++it)->u.operand;
1177 int r1 = (++it)->u.operand;
1178 printLocationAndOp(out, exec, location, it, "put_getter_by_id");
1179 out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
1182 case op_put_setter_by_id: {
1183 int r0 = (++it)->u.operand;
1184 int id0 = (++it)->u.operand;
1185 int n0 = (++it)->u.operand;
1186 int r1 = (++it)->u.operand;
1187 printLocationAndOp(out, exec, location, it, "put_setter_by_id");
1188 out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
1191 case op_put_getter_setter_by_id: {
1192 int r0 = (++it)->u.operand;
1193 int id0 = (++it)->u.operand;
1194 int n0 = (++it)->u.operand;
1195 int r1 = (++it)->u.operand;
1196 int r2 = (++it)->u.operand;
1197 printLocationAndOp(out, exec, location, it, "put_getter_setter_by_id");
1198 out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data());
1201 case op_put_getter_by_val: {
1202 int r0 = (++it)->u.operand;
1203 int r1 = (++it)->u.operand;
1204 int n0 = (++it)->u.operand;
1205 int r2 = (++it)->u.operand;
1206 printLocationAndOp(out, exec, location, it, "put_getter_by_val");
1207 out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
1210 case op_put_setter_by_val: {
1211 int r0 = (++it)->u.operand;
1212 int r1 = (++it)->u.operand;
1213 int n0 = (++it)->u.operand;
1214 int r2 = (++it)->u.operand;
1215 printLocationAndOp(out, exec, location, it, "put_setter_by_val");
1216 out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
1219 case op_define_data_property: {
1220 int r0 = (++it)->u.operand;
1221 int r1 = (++it)->u.operand;
1222 int r2 = (++it)->u.operand;
1223 int r3 = (++it)->u.operand;
1224 printLocationAndOp(out, exec, location, it, "define_data_property");
1225 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
1228 case op_define_accessor_property: {
1229 int r0 = (++it)->u.operand;
1230 int r1 = (++it)->u.operand;
1231 int r2 = (++it)->u.operand;
1232 int r3 = (++it)->u.operand;
1233 int r4 = (++it)->u.operand;
1234 printLocationAndOp(out, exec, location, it, "define_accessor_property");
1235 out.printf("%s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data());
1238 case op_del_by_id: {
1239 int r0 = (++it)->u.operand;
1240 int r1 = (++it)->u.operand;
1241 int id0 = (++it)->u.operand;
1242 printLocationAndOp(out, exec, location, it, "del_by_id");
1243 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1246 case op_get_by_val: {
1247 int r0 = (++it)->u.operand;
1248 int r1 = (++it)->u.operand;
1249 int r2 = (++it)->u.operand;
1250 printLocationAndOp(out, exec, location, it, "get_by_val");
1251 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1252 dumpArrayProfiling(out, it, hasPrintedProfiling);
1253 dumpValueProfiling(out, it, hasPrintedProfiling);
1256 case op_put_by_val: {
1257 int r0 = (++it)->u.operand;
1258 int r1 = (++it)->u.operand;
1259 int r2 = (++it)->u.operand;
1260 printLocationAndOp(out, exec, location, it, "put_by_val");
1261 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1262 dumpArrayProfiling(out, it, hasPrintedProfiling);
1265 case op_put_by_val_direct: {
1266 int r0 = (++it)->u.operand;
1267 int r1 = (++it)->u.operand;
1268 int r2 = (++it)->u.operand;
1269 printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1270 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1271 dumpArrayProfiling(out, it, hasPrintedProfiling);
1274 case op_del_by_val: {
1275 int r0 = (++it)->u.operand;
1276 int r1 = (++it)->u.operand;
1277 int r2 = (++it)->u.operand;
1278 printLocationAndOp(out, exec, location, it, "del_by_val");
1279 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1282 case op_put_by_index: {
1283 int r0 = (++it)->u.operand;
1284 unsigned n0 = (++it)->u.operand;
1285 int r1 = (++it)->u.operand;
1286 printLocationAndOp(out, exec, location, it, "put_by_index");
1287 out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1291 int offset = (++it)->u.operand;
1292 printLocationAndOp(out, exec, location, it, "jmp");
1293 out.printf("%d(->%d)", offset, location + offset);
1297 printConditionalJump(out, exec, begin, it, location, "jtrue");
1301 printConditionalJump(out, exec, begin, it, location, "jfalse");
1305 printConditionalJump(out, exec, begin, it, location, "jeq_null");
1308 case op_jneq_null: {
1309 printConditionalJump(out, exec, begin, it, location, "jneq_null");
1313 int r0 = (++it)->u.operand;
1314 Special::Pointer pointer = (++it)->u.specialPointer;
1315 int offset = (++it)->u.operand;
1316 printLocationAndOp(out, exec, location, it, "jneq_ptr");
1317 out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1322 int r0 = (++it)->u.operand;
1323 int r1 = (++it)->u.operand;
1324 int offset = (++it)->u.operand;
1325 printLocationAndOp(out, exec, location, it, "jless");
1326 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1330 int r0 = (++it)->u.operand;
1331 int r1 = (++it)->u.operand;
1332 int offset = (++it)->u.operand;
1333 printLocationAndOp(out, exec, location, it, "jlesseq");
1334 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1338 int r0 = (++it)->u.operand;
1339 int r1 = (++it)->u.operand;
1340 int offset = (++it)->u.operand;
1341 printLocationAndOp(out, exec, location, it, "jgreater");
1342 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1345 case op_jgreatereq: {
1346 int r0 = (++it)->u.operand;
1347 int r1 = (++it)->u.operand;
1348 int offset = (++it)->u.operand;
1349 printLocationAndOp(out, exec, location, it, "jgreatereq");
1350 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1354 int r0 = (++it)->u.operand;
1355 int r1 = (++it)->u.operand;
1356 int offset = (++it)->u.operand;
1357 printLocationAndOp(out, exec, location, it, "jnless");
1358 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1362 int r0 = (++it)->u.operand;
1363 int r1 = (++it)->u.operand;
1364 int offset = (++it)->u.operand;
1365 printLocationAndOp(out, exec, location, it, "jnlesseq");
1366 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1369 case op_jngreater: {
1370 int r0 = (++it)->u.operand;
1371 int r1 = (++it)->u.operand;
1372 int offset = (++it)->u.operand;
1373 printLocationAndOp(out, exec, location, it, "jngreater");
1374 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1377 case op_jngreatereq: {
1378 int r0 = (++it)->u.operand;
1379 int r1 = (++it)->u.operand;
1380 int offset = (++it)->u.operand;
1381 printLocationAndOp(out, exec, location, it, "jngreatereq");
1382 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1385 case op_loop_hint: {
1386 printLocationAndOp(out, exec, location, it, "loop_hint");
1390 printLocationAndOp(out, exec, location, it, "watchdog");
1393 case op_log_shadow_chicken_prologue: {
1394 int r0 = (++it)->u.operand;
1395 printLocationAndOp(out, exec, location, it, "log_shadow_chicken_prologue");
1396 out.printf("%s", registerName(r0).data());
1399 case op_log_shadow_chicken_tail: {
1400 int r0 = (++it)->u.operand;
1401 int r1 = (++it)->u.operand;
1402 printLocationAndOp(out, exec, location, it, "log_shadow_chicken_tail");
1403 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1406 case op_switch_imm: {
1407 int tableIndex = (++it)->u.operand;
1408 int defaultTarget = (++it)->u.operand;
1409 int scrutineeRegister = (++it)->u.operand;
1410 printLocationAndOp(out, exec, location, it, "switch_imm");
1411 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1414 case op_switch_char: {
1415 int tableIndex = (++it)->u.operand;
1416 int defaultTarget = (++it)->u.operand;
1417 int scrutineeRegister = (++it)->u.operand;
1418 printLocationAndOp(out, exec, location, it, "switch_char");
1419 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1422 case op_switch_string: {
1423 int tableIndex = (++it)->u.operand;
1424 int defaultTarget = (++it)->u.operand;
1425 int scrutineeRegister = (++it)->u.operand;
1426 printLocationAndOp(out, exec, location, it, "switch_string");
1427 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1431 int r0 = (++it)->u.operand;
1432 int r1 = (++it)->u.operand;
1433 int f0 = (++it)->u.operand;
1434 printLocationAndOp(out, exec, location, it, "new_func");
1435 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1438 case op_new_generator_func: {
1439 int r0 = (++it)->u.operand;
1440 int r1 = (++it)->u.operand;
1441 int f0 = (++it)->u.operand;
1442 printLocationAndOp(out, exec, location, it, "new_generator_func");
1443 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1446 case op_new_async_func: {
1447 int r0 = (++it)->u.operand;
1448 int r1 = (++it)->u.operand;
1449 int f0 = (++it)->u.operand;
1450 printLocationAndOp(out, exec, location, it, "new_async_func");
1451 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1454 case op_new_func_exp: {
1455 int r0 = (++it)->u.operand;
1456 int r1 = (++it)->u.operand;
1457 int f0 = (++it)->u.operand;
1458 printLocationAndOp(out, exec, location, it, "new_func_exp");
1459 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1462 case op_new_generator_func_exp: {
1463 int r0 = (++it)->u.operand;
1464 int r1 = (++it)->u.operand;
1465 int f0 = (++it)->u.operand;
1466 printLocationAndOp(out, exec, location, it, "new_generator_func_exp");
1467 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1470 case op_new_async_func_exp: {
1471 int r0 = (++it)->u.operand;
1472 int r1 = (++it)->u.operand;
1473 int f0 = (++it)->u.operand;
1474 printLocationAndOp(out, exec, location, it, "new_async_func_exp");
1475 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1478 case op_set_function_name: {
1479 int funcReg = (++it)->u.operand;
1480 int nameReg = (++it)->u.operand;
1481 printLocationAndOp(out, exec, location, it, "set_function_name");
1482 out.printf("%s, %s", registerName(funcReg).data(), registerName(nameReg).data());
1486 printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1489 case op_tail_call: {
1490 printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1493 case op_call_eval: {
1494 printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
1498 case op_construct_varargs:
1499 case op_call_varargs:
1500 case op_tail_call_varargs:
1501 case op_tail_call_forward_arguments: {
1502 int result = (++it)->u.operand;
1503 int callee = (++it)->u.operand;
1504 int thisValue = (++it)->u.operand;
1505 int arguments = (++it)->u.operand;
1506 int firstFreeRegister = (++it)->u.operand;
1507 int varArgOffset = (++it)->u.operand;
1510 if (opcode == op_call_varargs)
1511 opName = "call_varargs";
1512 else if (opcode == op_construct_varargs)
1513 opName = "construct_varargs";
1514 else if (opcode == op_tail_call_varargs)
1515 opName = "tail_call_varargs";
1516 else if (opcode == op_tail_call_forward_arguments)
1517 opName = "tail_call_forward_arguments";
1519 RELEASE_ASSERT_NOT_REACHED();
1521 printLocationAndOp(out, exec, location, it, opName);
1522 out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
1523 dumpValueProfiling(out, it, hasPrintedProfiling);
1528 int r0 = (++it)->u.operand;
1529 printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1532 case op_construct: {
1533 printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
1537 int r0 = (++it)->u.operand;
1538 int r1 = (++it)->u.operand;
1539 int count = (++it)->u.operand;
1540 printLocationAndOp(out, exec, location, it, "strcat");
1541 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1544 case op_to_primitive: {
1545 int r0 = (++it)->u.operand;
1546 int r1 = (++it)->u.operand;
1547 printLocationAndOp(out, exec, location, it, "to_primitive");
1548 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1551 case op_get_enumerable_length: {
1552 int dst = it[1].u.operand;
1553 int base = it[2].u.operand;
1554 printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
1555 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1556 it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
1559 case op_has_indexed_property: {
1560 int dst = it[1].u.operand;
1561 int base = it[2].u.operand;
1562 int propertyName = it[3].u.operand;
1563 ArrayProfile* arrayProfile = it[4].u.arrayProfile;
1564 printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
1565 out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
1566 it += OPCODE_LENGTH(op_has_indexed_property) - 1;
1569 case op_has_structure_property: {
1570 int dst = it[1].u.operand;
1571 int base = it[2].u.operand;
1572 int propertyName = it[3].u.operand;
1573 int enumerator = it[4].u.operand;
1574 printLocationAndOp(out, exec, location, it, "op_has_structure_property");
1575 out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
1576 it += OPCODE_LENGTH(op_has_structure_property) - 1;
1579 case op_has_generic_property: {
1580 int dst = it[1].u.operand;
1581 int base = it[2].u.operand;
1582 int propertyName = it[3].u.operand;
1583 printLocationAndOp(out, exec, location, it, "op_has_generic_property");
1584 out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
1585 it += OPCODE_LENGTH(op_has_generic_property) - 1;
1588 case op_get_direct_pname: {
1589 int dst = it[1].u.operand;
1590 int base = it[2].u.operand;
1591 int propertyName = it[3].u.operand;
1592 int index = it[4].u.operand;
1593 int enumerator = it[5].u.operand;
1594 ValueProfile* profile = it[6].u.profile;
1595 printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
1596 out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
1597 it += OPCODE_LENGTH(op_get_direct_pname) - 1;
1601 case op_get_property_enumerator: {
1602 int dst = it[1].u.operand;
1603 int base = it[2].u.operand;
1604 printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
1605 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1606 it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
1609 case op_enumerator_structure_pname: {
1610 int dst = it[1].u.operand;
1611 int enumerator = it[2].u.operand;
1612 int index = it[3].u.operand;
1613 printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
1614 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1615 it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
1618 case op_enumerator_generic_pname: {
1619 int dst = it[1].u.operand;
1620 int enumerator = it[2].u.operand;
1621 int index = it[3].u.operand;
1622 printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
1623 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1624 it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
1627 case op_to_index_string: {
1628 int dst = it[1].u.operand;
1629 int index = it[2].u.operand;
1630 printLocationAndOp(out, exec, location, it, "op_to_index_string");
1631 out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
1632 it += OPCODE_LENGTH(op_to_index_string) - 1;
1635 case op_push_with_scope: {
1636 int dst = (++it)->u.operand;
1637 int newScope = (++it)->u.operand;
1638 int currentScope = (++it)->u.operand;
1639 printLocationAndOp(out, exec, location, it, "push_with_scope");
1640 out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data());
1643 case op_get_parent_scope: {
1644 int dst = (++it)->u.operand;
1645 int parentScope = (++it)->u.operand;
1646 printLocationAndOp(out, exec, location, it, "get_parent_scope");
1647 out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
1650 case op_create_lexical_environment: {
1651 int dst = (++it)->u.operand;
1652 int scope = (++it)->u.operand;
1653 int symbolTable = (++it)->u.operand;
1654 int initialValue = (++it)->u.operand;
1655 printLocationAndOp(out, exec, location, it, "create_lexical_environment");
1656 out.printf("%s, %s, %s, %s",
1657 registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
1661 int r0 = (++it)->u.operand;
1662 int r1 = (++it)->u.operand;
1663 printLocationAndOp(out, exec, location, it, "catch");
1664 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1668 int r0 = (++it)->u.operand;
1669 printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1672 case op_throw_static_error: {
1673 int k0 = (++it)->u.operand;
1674 ErrorType k1 = static_cast<ErrorType>((++it)->u.unsignedValue);
1675 printLocationAndOp(out, exec, location, it, "throw_static_error");
1676 out.printf("%s, ", constantName(k0).data());
1681 int debugHookType = (++it)->u.operand;
1682 int hasBreakpointFlag = (++it)->u.operand;
1683 printLocationAndOp(out, exec, location, it, "debug");
1684 out.printf("%s, %d", debugHookName(debugHookType), hasBreakpointFlag);
1688 int condition = (++it)->u.operand;
1689 int line = (++it)->u.operand;
1690 printLocationAndOp(out, exec, location, it, "assert");
1691 out.printf("%s, %d", registerName(condition).data(), line);
1695 int r0 = (++it)->u.operand;
1696 printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1699 case op_resolve_scope: {
1700 int r0 = (++it)->u.operand;
1701 int scope = (++it)->u.operand;
1702 int id0 = (++it)->u.operand;
1703 ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand);
1704 int depth = (++it)->u.operand;
1705 void* pointer = (++it)->u.pointer;
1706 printLocationAndOp(out, exec, location, it, "resolve_scope");
1707 out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer);
1710 case op_get_from_scope: {
1711 int r0 = (++it)->u.operand;
1712 int r1 = (++it)->u.operand;
1713 int id0 = (++it)->u.operand;
1714 GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
1716 int operand = (++it)->u.operand; // Operand
1717 printLocationAndOp(out, exec, location, it, "get_from_scope");
1718 out.print(registerName(r0), ", ", registerName(r1));
1719 if (static_cast<unsigned>(id0) == UINT_MAX)
1720 out.print(", anonymous");
1722 out.print(", ", idName(id0, identifier(id0)));
1723 out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand);
1724 dumpValueProfiling(out, it, hasPrintedProfiling);
1727 case op_put_to_scope: {
1728 int r0 = (++it)->u.operand;
1729 int id0 = (++it)->u.operand;
1730 int r1 = (++it)->u.operand;
1731 GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
1733 int operand = (++it)->u.operand; // Operand
1734 printLocationAndOp(out, exec, location, it, "put_to_scope");
1735 out.print(registerName(r0));
1736 if (static_cast<unsigned>(id0) == UINT_MAX)
1737 out.print(", anonymous");
1739 out.print(", ", idName(id0, identifier(id0)));
1740 out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand);
1743 case op_get_from_arguments: {
1744 int r0 = (++it)->u.operand;
1745 int r1 = (++it)->u.operand;
1746 int offset = (++it)->u.operand;
1747 printLocationAndOp(out, exec, location, it, "get_from_arguments");
1748 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
1749 dumpValueProfiling(out, it, hasPrintedProfiling);
1752 case op_put_to_arguments: {
1753 int r0 = (++it)->u.operand;
1754 int offset = (++it)->u.operand;
1755 int r1 = (++it)->u.operand;
1756 printLocationAndOp(out, exec, location, it, "put_to_arguments");
1757 out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
1761 RELEASE_ASSERT_NOT_REACHED();
1764 dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1766 dumpArithProfile(out, arithProfileForBytecodeOffset(location), hasPrintedProfiling);
1770 Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1771 if (!exitSites.isEmpty()) {
1772 out.print(" !! frequent exits: ");
1774 for (unsigned i = 0; i < exitSites.size(); ++i)
1775 out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
1777 #else // ENABLE(DFG_JIT)
1778 UNUSED_PARAM(location);
1779 #endif // ENABLE(DFG_JIT)
1783 void CodeBlock::dumpBytecode(
1784 PrintStream& out, unsigned bytecodeOffset,
1785 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
1787 ExecState* exec = m_globalObject->globalExec();
1788 const Instruction* it = instructions().begin() + bytecodeOffset;
1789 dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
1792 #define FOR_EACH_MEMBER_VECTOR(macro) \
1793 macro(instructions) \
1794 macro(callLinkInfos) \
1795 macro(linkedCallerList) \
1796 macro(identifiers) \
1797 macro(functionExpressions) \
1798 macro(constantRegisters)
1800 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1803 macro(exceptionHandlers) \
1804 macro(switchJumpTables) \
1805 macro(stringSwitchJumpTables) \
1806 macro(evalCodeCache) \
1807 macro(expressionInfo) \
1809 macro(callReturnIndexVector)
1811 template<typename T>
1812 static size_t sizeInBytes(const Vector<T>& vector)
1814 return vector.capacity() * sizeof(T);
1819 class PutToScopeFireDetail : public FireDetail {
1821 PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
1822 : m_codeBlock(codeBlock)
1827 void dump(PrintStream& out) const override
1829 out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
1833 CodeBlock* m_codeBlock;
1834 const Identifier& m_ident;
1837 } // anonymous namespace
1839 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
1840 : JSCell(*vm, structure)
1841 , m_globalObject(other.m_globalObject)
1842 , m_numCalleeLocals(other.m_numCalleeLocals)
1843 , m_numVars(other.m_numVars)
1844 , m_shouldAlwaysBeInlined(true)
1846 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1848 , m_didFailJITCompilation(false)
1849 , m_didFailFTLCompilation(false)
1850 , m_hasBeenCompiledWithFTL(false)
1851 , m_isConstructor(other.m_isConstructor)
1852 , m_isStrictMode(other.m_isStrictMode)
1853 , m_codeType(other.m_codeType)
1854 , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
1855 , m_hasDebuggerStatement(false)
1856 , m_steppingMode(SteppingModeDisabled)
1857 , m_numBreakpoints(0)
1858 , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
1860 , m_instructions(other.m_instructions)
1861 , m_thisRegister(other.m_thisRegister)
1862 , m_scopeRegister(other.m_scopeRegister)
1863 , m_hash(other.m_hash)
1864 , m_source(other.m_source)
1865 , m_sourceOffset(other.m_sourceOffset)
1866 , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1867 , m_constantRegisters(other.m_constantRegisters)
1868 , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
1869 , m_functionDecls(other.m_functionDecls)
1870 , m_functionExprs(other.m_functionExprs)
1871 , m_osrExitCounter(0)
1872 , m_optimizationDelayCounter(0)
1873 , m_reoptimizationRetryCounter(0)
1874 , m_creationTime(std::chrono::steady_clock::now())
1876 m_visitWeaklyHasBeenCalled = false;
1878 ASSERT(heap()->isDeferred());
1879 ASSERT(m_scopeRegister.isLocal());
1881 setNumParameters(other.numParameters());
1884 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
1886 Base::finishCreation(vm);
1888 optimizeAfterWarmUp();
1891 if (other.m_rareData) {
1892 createRareDataIfNecessary();
1894 m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1895 m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1896 m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1897 m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1900 heap()->m_codeBlocks->add(this);
1903 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
1904 JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1905 : JSCell(*vm, structure)
1906 , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
1907 , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
1908 , m_numVars(unlinkedCodeBlock->m_numVars)
1909 , m_shouldAlwaysBeInlined(true)
1911 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1913 , m_didFailJITCompilation(false)
1914 , m_didFailFTLCompilation(false)
1915 , m_hasBeenCompiledWithFTL(false)
1916 , m_isConstructor(unlinkedCodeBlock->isConstructor())
1917 , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1918 , m_codeType(unlinkedCodeBlock->codeType())
1919 , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
1920 , m_hasDebuggerStatement(false)
1921 , m_steppingMode(SteppingModeDisabled)
1922 , m_numBreakpoints(0)
1923 , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
1924 , m_vm(unlinkedCodeBlock->vm())
1925 , m_thisRegister(unlinkedCodeBlock->thisRegister())
1926 , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
1927 , m_source(sourceProvider)
1928 , m_sourceOffset(sourceOffset)
1929 , m_firstLineColumnOffset(firstLineColumnOffset)
1930 , m_osrExitCounter(0)
1931 , m_optimizationDelayCounter(0)
1932 , m_reoptimizationRetryCounter(0)
1933 , m_creationTime(std::chrono::steady_clock::now())
1935 m_visitWeaklyHasBeenCalled = false;
1937 ASSERT(heap()->isDeferred());
1938 ASSERT(m_scopeRegister.isLocal());
1941 setNumParameters(unlinkedCodeBlock->numParameters());
1944 void CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
1947 Base::finishCreation(vm);
1949 if (vm.typeProfiler() || vm.controlFlowProfiler())
1950 vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
1952 setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
1953 if (unlinkedCodeBlock->usesGlobalObject())
1954 m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
1956 for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
1957 LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
1958 if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
1959 m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
1962 // We already have the cloned symbol table for the module environment since we need to instantiate
1963 // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
1964 if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(unlinkedCodeBlock)) {
1965 SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
1966 if (m_vm->typeProfiler()) {
1967 ConcurrentJSLocker locker(clonedSymbolTable->m_lock);
1968 clonedSymbolTable->prepareForTypeProfiling(locker);
1970 replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
1973 bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
1974 m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
1975 for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1976 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1977 if (shouldUpdateFunctionHasExecutedCache)
1978 vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1979 m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1982 m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
1983 for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1984 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1985 if (shouldUpdateFunctionHasExecutedCache)
1986 vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1987 m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1990 if (unlinkedCodeBlock->hasRareData()) {
1991 createRareDataIfNecessary();
1992 if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1993 m_rareData->m_constantBuffers.grow(count);
1994 for (size_t i = 0; i < count; i++) {
1995 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1996 m_rareData->m_constantBuffers[i] = buffer;
1999 if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
2000 m_rareData->m_exceptionHandlers.resizeToFit(count);
2001 for (size_t i = 0; i < count; i++) {
2002 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
2003 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
2005 handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
2007 handler.initialize(unlinkedHandler);
2012 if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
2013 m_rareData->m_stringSwitchJumpTables.grow(count);
2014 for (size_t i = 0; i < count; i++) {
2015 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
2016 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
2017 for (; ptr != end; ++ptr) {
2018 OffsetLocation offset;
2019 offset.branchOffset = ptr->value.branchOffset;
2020 m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
2025 if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
2026 m_rareData->m_switchJumpTables.grow(count);
2027 for (size_t i = 0; i < count; i++) {
2028 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
2029 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
2030 destTable.branchOffsets = sourceTable.branchOffsets;
2031 destTable.min = sourceTable.min;
2036 // Allocate metadata buffers for the bytecode
2037 if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
2038 m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
2039 if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
2040 m_arrayProfiles.grow(size);
2041 if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
2042 m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
2043 if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
2044 m_valueProfiles = RefCountedArray<ValueProfile>(size);
2045 if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
2046 m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
2049 setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
2052 // Copy and translate the UnlinkedInstructions
2053 unsigned instructionCount = unlinkedCodeBlock->instructions().count();
2054 UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
2056 // Bookkeep the strongly referenced module environments.
2057 HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
2059 RefCountedArray<Instruction> instructions(instructionCount);
2061 unsigned valueProfileCount = 0;
2062 auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) {
2063 unsigned valueProfileIndex = valueProfileCount++;
2064 ValueProfile* profile = &m_valueProfiles[valueProfileIndex];
2065 ASSERT(profile->m_bytecodeOffset == -1);
2066 profile->m_bytecodeOffset = bytecodeOffset;
2067 instructions[bytecodeOffset + opLength - 1] = profile;
2070 for (unsigned i = 0; !instructionReader.atEnd(); ) {
2071 const UnlinkedInstruction* pc = instructionReader.next();
2073 unsigned opLength = opcodeLength(pc[0].u.opcode);
2075 instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
2076 for (size_t j = 1; j < opLength; ++j) {
2077 if (sizeof(int32_t) != sizeof(intptr_t))
2078 instructions[i + j].u.pointer = 0;
2079 instructions[i + j].u.operand = pc[j].u.operand;
2081 switch (pc[0].u.opcode) {
2082 case op_has_indexed_property: {
2083 int arrayProfileIndex = pc[opLength - 1].u.operand;
2084 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2086 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
2089 case op_call_varargs:
2090 case op_tail_call_varargs:
2091 case op_tail_call_forward_arguments:
2092 case op_construct_varargs:
2093 case op_get_by_val: {
2094 int arrayProfileIndex = pc[opLength - 2].u.operand;
2095 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2097 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
2100 case op_get_direct_pname:
2102 case op_get_by_id_with_this:
2103 case op_try_get_by_id:
2104 case op_get_by_val_with_this:
2105 case op_get_from_arguments:
2107 case op_get_argument: {
2108 linkValueProfile(i, opLength);
2111 case op_put_by_val: {
2112 int arrayProfileIndex = pc[opLength - 1].u.operand;
2113 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2114 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
2117 case op_put_by_val_direct: {
2118 int arrayProfileIndex = pc[opLength - 1].u.operand;
2119 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2120 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
2125 case op_new_array_buffer:
2126 case op_new_array_with_size: {
2127 int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
2128 instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
2131 case op_new_object: {
2132 int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
2133 ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
2134 int inferredInlineCapacity = pc[opLength - 2].u.operand;
2136 instructions[i + opLength - 1] = objectAllocationProfile;
2137 objectAllocationProfile->initialize(vm,
2138 this, m_globalObject->objectPrototype(), inferredInlineCapacity);
2144 case op_call_eval: {
2145 linkValueProfile(i, opLength);
2146 int arrayProfileIndex = pc[opLength - 2].u.operand;
2147 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2148 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
2149 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
2152 case op_construct: {
2153 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
2154 linkValueProfile(i, opLength);
2157 case op_get_array_length:
2160 case op_resolve_scope: {
2161 const Identifier& ident = identifier(pc[3].u.operand);
2162 ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
2163 RELEASE_ASSERT(type != LocalClosureVar);
2164 int localScopeDepth = pc[5].u.operand;
2166 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
2167 instructions[i + 4].u.operand = op.type;
2168 instructions[i + 5].u.operand = op.depth;
2169 if (op.lexicalEnvironment) {
2170 if (op.type == ModuleVar) {
2171 // Keep the linked module environment strongly referenced.
2172 if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
2173 addConstant(op.lexicalEnvironment);
2174 instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
2176 instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
2177 } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
2178 instructions[i + 6].u.jsCell.set(vm, this, constantScope);
2180 instructions[i + 6].u.pointer = nullptr;
2184 case op_get_from_scope: {
2185 linkValueProfile(i, opLength);
2187 // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
2189 int localScopeDepth = pc[5].u.operand;
2190 instructions[i + 5].u.pointer = nullptr;
2192 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
2193 ASSERT(!isInitialization(getPutInfo.initializationMode()));
2194 if (getPutInfo.resolveType() == LocalClosureVar) {
2195 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
2199 const Identifier& ident = identifier(pc[3].u.operand);
2200 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
2202 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
2203 if (op.type == ModuleVar)
2204 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
2205 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
2206 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2207 else if (op.structure)
2208 instructions[i + 5].u.structure.set(vm, this, op.structure);
2209 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2213 case op_put_to_scope: {
2214 // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
2215 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
2216 if (getPutInfo.resolveType() == LocalClosureVar) {
2217 // Only do watching if the property we're putting to is not anonymous.
2218 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
2219 int symbolTableIndex = pc[5].u.operand;
2220 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
2221 const Identifier& ident = identifier(pc[2].u.operand);
2222 ConcurrentJSLocker locker(symbolTable->m_lock);
2223 auto iter = symbolTable->find(locker, ident.impl());
2224 ASSERT(iter != symbolTable->end(locker));
2225 iter->value.prepareToWatch();
2226 instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
2228 instructions[i + 5].u.watchpointSet = nullptr;
2232 const Identifier& ident = identifier(pc[2].u.operand);
2233 int localScopeDepth = pc[5].u.operand;
2234 instructions[i + 5].u.pointer = nullptr;
2235 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
2237 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
2238 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
2239 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2240 else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
2241 if (op.watchpointSet)
2242 op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident));
2243 } else if (op.structure)
2244 instructions[i + 5].u.structure.set(vm, this, op.structure);
2245 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2250 case op_profile_type: {
2251 RELEASE_ASSERT(vm.typeProfiler());
2252 // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
2253 size_t instructionOffset = i + opLength - 1;
2254 unsigned divotStart, divotEnd;
2255 GlobalVariableID globalVariableID = 0;
2256 RefPtr<TypeSet> globalTypeSet;
2257 bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
2258 VirtualRegister profileRegister(pc[1].u.operand);
2259 ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
2260 SymbolTable* symbolTable = nullptr;
2263 case ProfileTypeBytecodeClosureVar: {
2264 const Identifier& ident = identifier(pc[4].u.operand);
2265 int localScopeDepth = pc[2].u.operand;
2266 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
2267 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
2268 // we're abstractly "read"ing from a JSScope.
2269 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
2271 if (op.type == ClosureVar || op.type == ModuleVar)
2272 symbolTable = op.lexicalEnvironment->symbolTable();
2273 else if (op.type == GlobalVar)
2274 symbolTable = m_globalObject.get()->symbolTable();
2276 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
2278 ConcurrentJSLocker locker(symbolTable->m_lock);
2279 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2280 symbolTable->prepareForTypeProfiling(locker);
2281 globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
2282 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
2284 globalVariableID = TypeProfilerNoGlobalIDExists;
2288 case ProfileTypeBytecodeLocallyResolved: {
2289 int symbolTableIndex = pc[2].u.operand;
2290 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
2291 const Identifier& ident = identifier(pc[4].u.operand);
2292 ConcurrentJSLocker locker(symbolTable->m_lock);
2293 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2294 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
2295 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
2299 case ProfileTypeBytecodeDoesNotHaveGlobalID:
2300 case ProfileTypeBytecodeFunctionArgument: {
2301 globalVariableID = TypeProfilerNoGlobalIDExists;
2304 case ProfileTypeBytecodeFunctionReturnStatement: {
2305 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
2306 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
2307 globalVariableID = TypeProfilerReturnStatement;
2308 if (!shouldAnalyze) {
2309 // Because a return statement can be added implicitly to return undefined at the end of a function,
2310 // and these nodes don't emit expression ranges because they aren't in the actual source text of
2311 // the user's program, give the type profiler some range to identify these return statements.
2312 // Currently, the text offset that is used as identification is "f" in the function keyword
2313 // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
2314 divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
2315 shouldAnalyze = true;
2321 std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
2322 ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, &vm);
2323 TypeLocation* location = locationPair.first;
2324 bool isNewLocation = locationPair.second;
2326 if (flag == ProfileTypeBytecodeFunctionReturnStatement)
2327 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
2329 if (shouldAnalyze && isNewLocation)
2330 vm.typeProfiler()->insertNewLocation(location);
2332 instructions[i + 2].u.location = location;
2337 if (pc[1].u.index == DidReachBreakpoint)
2338 m_hasDebuggerStatement = true;
2342 case op_create_rest: {
2343 int numberOfArgumentsToSkip = instructions[i + 3].u.operand;
2344 ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0);
2345 ASSERT_WITH_MESSAGE(numberOfArgumentsToSkip == numParameters() - 1, "We assume that this is true when rematerializing the rest parameter during OSR exit in the FTL JIT.");
2355 if (vm.controlFlowProfiler())
2356 insertBasicBlockBoundariesForControlFlowProfiler(instructions);
2358 m_instructions = WTFMove(instructions);
2360 // Set optimization thresholds only after m_instructions is initialized, since these
2361 // rely on the instruction count (and are in theory permitted to also inspect the
2362 // instruction stream to more accurate assess the cost of tier-up).
2363 optimizeAfterWarmUp();
2366 // If the concurrent thread will want the code block's hash, then compute it here
2368 if (Options::alwaysComputeHash())
2371 if (Options::dumpGeneratedBytecodes())
2374 heap()->m_codeBlocks->add(this);
2375 heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
2378 #if ENABLE(WEBASSEMBLY)
2379 CodeBlock::CodeBlock(VM* vm, Structure* structure, WebAssemblyExecutable* ownerExecutable, JSGlobalObject* globalObject)
2380 : JSCell(*vm, structure)
2381 , m_globalObject(globalObject->vm(), this, globalObject)
2382 , m_numCalleeLocals(0)
2384 , m_shouldAlwaysBeInlined(false)
2386 , m_capabilityLevelState(DFG::CannotCompile)
2388 , m_didFailJITCompilation(false)
2389 , m_didFailFTLCompilation(false)
2390 , m_hasBeenCompiledWithFTL(false)
2391 , m_isConstructor(false)
2392 , m_isStrictMode(false)
2393 , m_codeType(FunctionCode)
2394 , m_hasDebuggerStatement(false)
2395 , m_steppingMode(SteppingModeDisabled)
2396 , m_numBreakpoints(0)
2397 , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
2399 , m_osrExitCounter(0)
2400 , m_optimizationDelayCounter(0)
2401 , m_reoptimizationRetryCounter(0)
2402 , m_creationTime(std::chrono::steady_clock::now())
2404 ASSERT(heap()->isDeferred());
2407 void CodeBlock::finishCreation(VM& vm, WebAssemblyExecutable*, JSGlobalObject*)
2409 Base::finishCreation(vm);
2411 heap()->m_codeBlocks->add(this);
2415 CodeBlock::~CodeBlock()
2417 if (m_vm->m_perBytecodeProfiler)
2418 m_vm->m_perBytecodeProfiler->notifyDestruction(this);
2420 if (unlinkedCodeBlock()->didOptimize() == MixedTriState)
2421 unlinkedCodeBlock()->setDidOptimize(FalseTriState);
2423 #if ENABLE(VERBOSE_VALUE_PROFILE)
2424 dumpValueProfiles();
2427 // We may be destroyed before any CodeBlocks that refer to us are destroyed.
2428 // Consider that two CodeBlocks become unreachable at the same time. There
2429 // is no guarantee about the order in which the CodeBlocks are destroyed.
2430 // So, if we don't remove incoming calls, and get destroyed before the
2431 // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
2432 // destructor will try to remove nodes from our (no longer valid) linked list.
2433 unlinkIncomingCalls();
2435 // Note that our outgoing calls will be removed from other CodeBlocks'
2436 // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
2440 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2441 StructureStubInfo* stub = *iter;
2445 #endif // ENABLE(JIT)
2448 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
2450 ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
2451 size_t count = constants.size();
2452 m_constantRegisters.resizeToFit(count);
2453 bool hasTypeProfiler = !!m_vm->typeProfiler();
2454 for (size_t i = 0; i < count; i++) {
2455 JSValue constant = constants[i].get();
2457 if (!constant.isEmpty()) {
2458 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(constant)) {
2459 if (hasTypeProfiler) {
2460 ConcurrentJSLocker locker(symbolTable->m_lock);
2461 symbolTable->prepareForTypeProfiling(locker);
2464 SymbolTable* clone = symbolTable->cloneScopePart(*m_vm);
2465 if (wasCompiledWithDebuggingOpcodes())
2466 clone->setRareDataCodeBlock(this);
2472 m_constantRegisters[i].set(*m_vm, this, constant);
2475 m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
2478 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
2480 m_alternative.set(vm, this, alternative);
2483 void CodeBlock::setNumParameters(int newValue)
2485 m_numParameters = newValue;
2487 m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
2490 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
2492 EvalCacheMap::iterator end = m_cacheMap.end();
2493 for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
2494 visitor.append(&ptr->value);
2497 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
2500 if (jitType() != JITCode::DFGJIT)
2502 DFG::JITCode* jitCode = m_jitCode->dfg();
2503 return jitCode->osrEntryBlock();
2504 #else // ENABLE(FTL_JIT)
2506 #endif // ENABLE(FTL_JIT)
2509 void CodeBlock::visitWeakly(SlotVisitor& visitor)
2511 ConcurrentJSLocker locker(m_lock);
2512 if (m_visitWeaklyHasBeenCalled)
2515 m_visitWeaklyHasBeenCalled = true;
2517 if (Heap::isMarkedConcurrently(this))
2520 if (shouldVisitStrongly(locker)) {
2521 visitor.appendUnbarrieredReadOnlyPointer(this);
2525 // There are two things that may use unconditional finalizers: inline cache clearing
2526 // and jettisoning. The probability of us wanting to do at least one of those things
2527 // is probably quite close to 1. So we add one no matter what and when it runs, it
2528 // figures out whether it has any work to do.
2529 visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
2531 if (!JITCode::isOptimizingJIT(jitType()))
2534 // If we jettison ourselves we'll install our alternative, so make sure that it
2535 // survives GC even if we don't.
2536 visitor.append(&m_alternative);
2538 // There are two things that we use weak reference harvesters for: DFG fixpoint for
2539 // jettisoning, and trying to find structures that would be live based on some
2540 // inline cache. So it makes sense to register them regardless.
2541 visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
2544 // We get here if we're live in the sense that our owner executable is live,
2545 // but we're not yet live for sure in another sense: we may yet decide that this
2546 // code block should be jettisoned based on its outgoing weak references being
2547 // stale. Set a flag to indicate that we're still assuming that we're dead, and
2548 // perform one round of determining if we're live. The GC may determine, based on
2549 // either us marking additional objects, or by other objects being marked for
2550 // other reasons, that this iteration should run again; it will notify us of this
2551 // decision by calling harvestWeakReferences().
2553 m_allTransitionsHaveBeenMarked = false;
2554 propagateTransitions(locker, visitor);
2556 m_jitCode->dfgCommon()->livenessHasBeenProved = false;
2557 determineLiveness(locker, visitor);
2558 #endif // ENABLE(DFG_JIT)
2561 size_t CodeBlock::estimatedSize(JSCell* cell)
2563 CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
2564 size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
2565 if (thisObject->m_jitCode)
2566 extraMemoryAllocated += thisObject->m_jitCode->size();
2567 return Base::estimatedSize(cell) + extraMemoryAllocated;
2570 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
2572 CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
2573 ASSERT_GC_OBJECT_INHERITS(thisObject, info());
2574 JSCell::visitChildren(thisObject, visitor);
2575 thisObject->visitChildren(visitor);
2578 void CodeBlock::visitChildren(SlotVisitor& visitor)
2580 ConcurrentJSLocker locker(m_lock);
2581 // There are two things that may use unconditional finalizers: inline cache clearing
2582 // and jettisoning. The probability of us wanting to do at least one of those things
2583 // is probably quite close to 1. So we add one no matter what and when it runs, it
2584 // figures out whether it has any work to do.
2585 visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
2587 if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2588 visitor.appendUnbarrieredReadOnlyPointer(otherBlock);
2591 visitor.reportExtraMemoryVisited(m_jitCode->size());
2592 if (m_instructions.size()) {
2593 unsigned refCount = m_instructions.refCount();
2594 RELEASE_ASSERT(refCount);
2595 visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount);
2598 stronglyVisitStrongReferences(locker, visitor);
2599 stronglyVisitWeakReferences(locker, visitor);
2601 m_allTransitionsHaveBeenMarked = false;
2602 propagateTransitions(locker, visitor);
2605 bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker)
2607 if (Options::forceCodeBlockLiveness())
2610 if (shouldJettisonDueToOldAge(locker))
2613 // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
2614 // their weak references go stale. So if a basline JIT CodeBlock gets
2615 // scanned, we can assume that this means that it's live.
2616 if (!JITCode::isOptimizingJIT(jitType()))
2622 bool CodeBlock::shouldJettisonDueToWeakReference()
2624 if (!JITCode::isOptimizingJIT(jitType()))
2626 return !Heap::isMarked(this);
2629 static std::chrono::milliseconds timeToLive(JITCode::JITType jitType)
2631 if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) {
2633 case JITCode::InterpreterThunk:
2634 return std::chrono::milliseconds(10);
2635 case JITCode::BaselineJIT:
2636 return std::chrono::milliseconds(10 + 20);
2637 case JITCode::DFGJIT:
2638 return std::chrono::milliseconds(40);
2639 case JITCode::FTLJIT:
2640 return std::chrono::milliseconds(120);
2642 return std::chrono::milliseconds::max();
2647 case JITCode::InterpreterThunk:
2648 return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5));
2649 case JITCode::BaselineJIT:
2650 // Effectively 10 additional seconds, since BaselineJIT and
2651 // InterpreterThunk share a CodeBlock.
2652 return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(5 + 10));
2653 case JITCode::DFGJIT:
2654 return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(20));
2655 case JITCode::FTLJIT:
2656 return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(60));
2658 return std::chrono::milliseconds::max();
2662 bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&)
2664 if (Heap::isMarkedConcurrently(this))
2667 if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge()))
2670 if (timeSinceCreation() < timeToLive(jitType()))
2677 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
2679 if (transition.m_codeOrigin && !Heap::isMarkedConcurrently(transition.m_codeOrigin.get()))
2682 if (!Heap::isMarkedConcurrently(transition.m_from.get()))
2687 #endif // ENABLE(DFG_JIT)
2689 void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor)
2691 UNUSED_PARAM(visitor);
2693 if (m_allTransitionsHaveBeenMarked)
2696 bool allAreMarkedSoFar = true;
2698 Interpreter* interpreter = m_vm->interpreter;
2699 if (jitType() == JITCode::InterpreterThunk) {
2700 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2701 for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2702 Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2703 switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2704 case op_put_by_id: {
2705 StructureID oldStructureID = instruction[4].u.structureID;
2706 StructureID newStructureID = instruction[6].u.structureID;
2707 if (!oldStructureID || !newStructureID)
2709 Structure* oldStructure =
2710 m_vm->heap.structureIDTable().get(oldStructureID);
2711 Structure* newStructure =
2712 m_vm->heap.structureIDTable().get(newStructureID);
2713 if (Heap::isMarkedConcurrently(oldStructure))
2714 visitor.appendUnbarrieredReadOnlyPointer(newStructure);
2716 allAreMarkedSoFar = false;
2726 if (JITCode::isJIT(jitType())) {
2727 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
2728 allAreMarkedSoFar &= (*iter)->propagateTransitions(visitor);
2730 #endif // ENABLE(JIT)
2733 if (JITCode::isOptimizingJIT(jitType())) {
2734 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2735 for (auto& weakReference : dfgCommon->weakStructureReferences)
2736 allAreMarkedSoFar &= weakReference->markIfCheap(visitor);
2738 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2739 if (shouldMarkTransition(dfgCommon->transitions[i])) {
2740 // If the following three things are live, then the target of the
2741 // transition is also live:
2743 // - This code block. We know it's live already because otherwise
2744 // we wouldn't be scanning ourselves.
2746 // - The code origin of the transition. Transitions may arise from
2747 // code that was inlined. They are not relevant if the user's
2748 // object that is required for the inlinee to run is no longer
2751 // - The source of the transition. The transition checks if some
2752 // heap location holds the source, and if so, stores the target.
2753 // Hence the source must be live for the transition to be live.
2755 // We also short-circuit the liveness if the structure is harmless
2756 // to mark (i.e. its global object and prototype are both already
2759 visitor.append(&dfgCommon->transitions[i].m_to);
2761 allAreMarkedSoFar = false;
2764 #endif // ENABLE(DFG_JIT)
2766 if (allAreMarkedSoFar)
2767 m_allTransitionsHaveBeenMarked = true;
2770 void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor)
2772 UNUSED_PARAM(visitor);
2775 // Check if we have any remaining work to do.
2776 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2777 if (dfgCommon->livenessHasBeenProved)
2780 // Now check all of our weak references. If all of them are live, then we
2781 // have proved liveness and so we scan our strong references. If at end of
2782 // GC we still have not proved liveness, then this code block is toast.
2783 bool allAreLiveSoFar = true;
2784 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2785 if (!Heap::isMarkedConcurrently(dfgCommon->weakReferences[i].get())) {
2786 allAreLiveSoFar = false;
2790 if (allAreLiveSoFar) {
2791 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
2792 if (!Heap::isMarkedConcurrently(dfgCommon->weakStructureReferences[i].get())) {
2793 allAreLiveSoFar = false;
2799 // If some weak references are dead, then this fixpoint iteration was
2801 if (!allAreLiveSoFar)
2804 // All weak references are live. Record this information so we don't
2805 // come back here again, and scan the strong references.
2806 dfgCommon->livenessHasBeenProved = true;
2807 visitor.appendUnbarrieredReadOnlyPointer(this);
2808 #endif // ENABLE(DFG_JIT)
2811 void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
2813 CodeBlock* codeBlock =
2814 bitwise_cast<CodeBlock*>(
2815 bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
2817 codeBlock->propagateTransitions(NoLockingNecessary, visitor);
2818 codeBlock->determineLiveness(NoLockingNecessary, visitor);
2821 void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)
2823 instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);
2824 instruction[4].u.pointer = nullptr;
2825 instruction[5].u.pointer = nullptr;
2826 instruction[6].u.pointer = nullptr;
2829 void CodeBlock::finalizeLLIntInlineCaches()
2831 #if ENABLE(WEBASSEMBLY)
2832 if (m_ownerExecutable->isWebAssemblyExecutable())
2836 Interpreter* interpreter = m_vm->interpreter;
2837 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2838 for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2839 Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2840 switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2842 case op_get_by_id_proto_load:
2843 case op_get_by_id_unset: {
2844 StructureID oldStructureID = curInstruction[4].u.structureID;
2845 if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
2847 if (Options::verboseOSR())
2848 dataLogF("Clearing LLInt property access.\n");
2849 clearLLIntGetByIdCache(curInstruction);
2852 case op_put_by_id: {
2853 StructureID oldStructureID = curInstruction[4].u.structureID;
2854 StructureID newStructureID = curInstruction[6].u.structureID;
2855 StructureChain* chain = curInstruction[7].u.structureChain.get();
2856 if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
2857 (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
2858 (!chain || Heap::isMarked(chain)))
2860 if (Options::verboseOSR())
2861 dataLogF("Clearing LLInt put transition.\n");
2862 curInstruction[4].u.structureID = 0;
2863 curInstruction[5].u.operand = 0;
2864 curInstruction[6].u.structureID = 0;
2865 curInstruction[7].u.structureChain.clear();
2868 case op_get_array_length:
2871 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2873 if (Options::verboseOSR())
2874 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2875 curInstruction[2].u.structure.clear();
2876 curInstruction[3].u.toThisStatus = merge(
2877 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
2879 case op_create_this: {
2880 auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
2881 if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
2883 JSCell* cachedFunction = cacheWriteBarrier.get();
2884 if (Heap::isMarked(cachedFunction))
2886 if (Options::verboseOSR())
2887 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
2888 cacheWriteBarrier.clear();
2891 case op_resolve_scope: {
2892 // Right now this isn't strictly necessary. Any symbol tables that this will refer to
2893 // are for outer functions, and we refer to those functions strongly, and they refer
2894 // to the symbol table strongly. But it's nice to be on the safe side.
2895 WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
2896 if (!symbolTable || Heap::isMarked(symbolTable.get()))
2898 if (Options::verboseOSR())
2899 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
2900 symbolTable.clear();
2903 case op_get_from_scope:
2904 case op_put_to_scope: {
2905 GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
2906 if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
2907 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
2909 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2910 if (!structure || Heap::isMarked(structure.get()))
2912 if (Options::verboseOSR())
2913 dataLogF("Clearing scope access with structure %p.\n", structure.get());
2918 OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
2919 ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
2923 // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set
2924 // then cleared the cache without GCing in between.
2925 m_llintGetByIdWatchpointMap.removeIf([](const StructureWatchpointMap::KeyValuePairType& pair) -> bool {
2926 return !Heap::isMarked(pair.key);
2929 for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2930 if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2931 if (Options::verboseOSR())
2932 dataLog("Clearing LLInt call from ", *this, "\n");
2933 m_llintCallLinkInfos[i].unlink();
2935 if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2936 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2940 void CodeBlock::finalizeBaselineJITInlineCaches()
2943 for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
2944 (*iter)->visitWeak(*vm());
2946 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2947 StructureStubInfo& stubInfo = **iter;
2948 stubInfo.visitWeakReferences(this);
2953 void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
2955 CodeBlock* codeBlock = bitwise_cast<CodeBlock*>(
2956 bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
2958 codeBlock->updateAllPredictions();
2961 if (codeBlock->shouldJettisonDueToWeakReference()) {
2962 codeBlock->jettison(Profiler::JettisonDueToWeakReference);
2965 #endif // ENABLE(DFG_JIT)
2967 if (codeBlock->shouldJettisonDueToOldAge(NoLockingNecessary)) {
2968 codeBlock->jettison(Profiler::JettisonDueToOldAge);
2972 if (JITCode::couldBeInterpreted(codeBlock->jitType()))
2973 codeBlock->finalizeLLIntInlineCaches();
2976 if (!!codeBlock->jitCode())
2977 codeBlock->finalizeBaselineJITInlineCaches();
2981 void CodeBlock::getStubInfoMap(const ConcurrentJSLocker&, StubInfoMap& result)
2984 if (JITCode::isJIT(jitType()))
2985 toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2987 UNUSED_PARAM(result);
2991 void CodeBlock::getStubInfoMap(StubInfoMap& result)
2993 ConcurrentJSLocker locker(m_lock);
2994 getStubInfoMap(locker, result);
2997 void CodeBlock::getCallLinkInfoMap(const ConcurrentJSLocker&, CallLinkInfoMap& result)
3000 if (JITCode::isJIT(jitType()))
3001 toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
3003 UNUSED_PARAM(result);
3007 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
3009 ConcurrentJSLocker locker(m_lock);
3010 getCallLinkInfoMap(locker, result);
3013 void CodeBlock::getByValInfoMap(const ConcurrentJSLocker&, ByValInfoMap& result)
3016 if (JITCode::isJIT(jitType())) {
3017 for (auto* byValInfo : m_byValInfos)
3018 result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
3021 UNUSED_PARAM(result);
3025 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
3027 ConcurrentJSLocker locker(m_lock);
3028 getByValInfoMap(locker, result);
3032 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
3034 ConcurrentJSLocker locker(m_lock);
3035 return m_stubInfos.add(accessType);
3038 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile)
3040 return m_addICs.add(arithProfile);
3043 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile)
3045 return m_mulICs.add(arithProfile);
3048 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile)
3050 return m_subICs.add(arithProfile);
3053 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile)
3055 return m_negICs.add(arithProfile);
3058 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
3060 for (StructureStubInfo* stubInfo : m_stubInfos) {
3061 if (stubInfo->codeOrigin == codeOrigin)
3067 ByValInfo* CodeBlock::addByValInfo()
3069 ConcurrentJSLocker locker(m_lock);
3070 return m_byValInfos.add();
3073 CallLinkInfo* CodeBlock::addCallLinkInfo()
3075 ConcurrentJSLocker locker(m_lock);
3076 return m_callLinkInfos.add();
3079 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
3081 for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
3082 if ((*iter)->codeOrigin() == CodeOrigin(index))
3088 void CodeBlock::resetJITData()
3090 RELEASE_ASSERT(!JITCode::isJIT(jitType()));
3091 ConcurrentJSLocker locker(m_lock);
3093 // We can clear these because no other thread will have references to any stub infos, call
3094 // link infos, or by val infos if we don't have JIT code. Attempts to query these data
3095 // structures using the concurrent API (getStubInfoMap and friends) will return nothing if we
3096 // don't have JIT code.
3097 m_stubInfos.clear();
3098 m_callLinkInfos.clear();
3099 m_byValInfos.clear();
3101 // We can clear this because the DFG's queries to these data structures are guarded by whether
3102 // there is JIT code.
3103 m_rareCaseProfiles.clear();
3107 void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor)
3109 // We strongly visit OSR exits targets because we don't want to deal with
3110 // the complexity of generating an exit target CodeBlock on demand and
3111 // guaranteeing that it matches the details of the CodeBlock we compiled
3112 // the OSR exit against.
3114 visitor.append(&m_alternative);
3117 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
3118 if (dfgCommon->inlineCallFrames) {
3119 for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
3120 ASSERT(inlineCallFrame->baselineCodeBlock);
3121 visitor.append(&inlineCallFrame->baselineCodeBlock);
3127 void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor)
3129 visitor.append(&m_globalObject);
3130 visitor.append(&m_ownerExecutable);
3131 visitor.append(&m_unlinkedCode);
3133 m_rareData->m_evalCodeCache.visitAggregate(visitor);
3134 visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
3135 for (size_t i = 0; i < m_functionExprs.size(); ++i)
3136 visitor.append(&m_functionExprs[i]);
3137 for (size_t i = 0; i < m_functionDecls.size(); ++i)
3138 visitor.append(&m_functionDecls[i]);
3139 for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
3140 m_objectAllocationProfiles[i].visitAggregate(visitor);
3143 for (ByValInfo* byValInfo : m_byValInfos)
3144 visitor.append(&byValInfo->cachedSymbol);
3148 if (JITCode::isOptimizingJIT(jitType()))
3149 visitOSRExitTargets(locker, visitor);
3153 void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor)
3155 UNUSED_PARAM(visitor);
3158 if (!JITCode::isOptimizingJIT(jitType()))
3161 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
3163 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
3164 if (!!dfgCommon->transitions[i].m_codeOrigin)
3165 visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
3166 visitor.append(&dfgCommon->transitions[i].m_from);
3167 visitor.append(&dfgCommon->transitions[i].m_to);
3170 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
3171 visitor.append(&dfgCommon->weakReferences[i]);
3173 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
3174 visitor.append(&dfgCommon->weakStructureReferences[i]);
3176 dfgCommon->livenessHasBeenProved = true;
3180 CodeBlock* CodeBlock::baselineAlternative()
3183 CodeBlock* result = this;
3184 while (result->alternative())
3185 result = result->alternative();
3186 RELEASE_ASSERT(result);
3187 RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
3194 CodeBlock* CodeBlock::baselineVersion()
3197 if (JITCode::isBaselineCode(jitType()))
3199 CodeBlock* result = replacement();
3201 // This can happen if we're creating the original CodeBlock for an executable.
3202 // Assume that we're the baseline CodeBlock.
3203 RELEASE_ASSERT(jitType() == JITCode::None);
3206 result = result->baselineAlternative();
3214 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
3216 return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
3219 bool CodeBlock::hasOptimizedReplacement()
3221 return hasOptimizedReplacement(jitType());
3225 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
3227 RELEASE_ASSERT(bytecodeOffset < instructions().size());
3228 return handlerForIndex(bytecodeOffset, requiredHandler);
3231 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
3235 return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler);
3238 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
3241 RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
3242 RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
3243 ASSERT(!!handlerForIndex(originalCallSite.bits()));
3244 CodeOrigin originalOrigin = codeOrigin(originalCallSite);
3245 return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
3247 // We never create new on-the-fly exception handling
3248 // call sites outside the DFG/FTL inline caches.
3249 UNUSED_PARAM(originalCallSite);
3250 RELEASE_ASSERT_NOT_REACHED();
3251 return CallSiteIndex(0u);
3255 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
3257 RELEASE_ASSERT(m_rareData);
3258 Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
3259 unsigned index = callSiteIndex.bits();
3260 for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
3261 HandlerInfo& handler = exceptionHandlers[i];
3262 if (handler.start <= index && handler.end > index) {
3263 exceptionHandlers.remove(i);
3268 RELEASE_ASSERT_NOT_REACHED();
3271 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
3273 RELEASE_ASSERT(bytecodeOffset < instructions().size());
3274 return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
3277 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
3284 expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
3288 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const
3290 m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
3291 divot += m_sourceOffset;
3292 column += line ? 1 : firstLineColumnOffset();
3293 line += ownerScriptExecutable()->firstLine();
3296 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
3298 Interpreter* interpreter = vm()->interpreter;
3299 const Instruction* begin = instructions().begin();
3300 const Instruction* end = instructions().end();
3301 for (const Instruction* it = begin; it != end;) {
3302 OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
3303 if (opcodeID == op_debug) {
3304 unsigned bytecodeOffset = it - begin;
3306 unsigned opDebugLine;
3307 unsigned opDebugColumn;
3308 expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
3309 if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
3312 it += opcodeLengths[opcodeID];
3317 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
3319 ConcurrentJSLocker locker(m_lock);
3321 m_rareCaseProfiles.shrinkToFit();
3323 if (shrinkMode == EarlyShrink) {
3324 m_constantRegisters.shrinkToFit();
3325 m_constantsSourceCodeRepresentation.shrinkToFit();
3328 m_rareData->m_switchJumpTables.shrinkToFit();
3329 m_rareData->m_stringSwitchJumpTables.shrinkToFit();
3331 } // else don't shrink these, because we would have already pointed pointers into these tables.
3335 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
3337 noticeIncomingCall(callerFrame);
3338 m_incomingCalls.push(incoming);
3341 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
3343 noticeIncomingCall(callerFrame);
3344 m_incomingPolymorphicCalls.push(incoming);
3346 #endif // ENABLE(JIT)
3348 void CodeBlock::unlinkIncomingCalls()
3350 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
3351 m_incomingLLIntCalls.begin()->unlink();
3353 while (m_incomingCalls.begin() != m_incomingCalls.end())
3354 m_incomingCalls.begin()->unlink(*vm());
3355 while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
3356 m_incomingPolymorphicCalls.begin()->unlink(*vm());
3357 #endif // ENABLE(JIT)
3360 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
3362 noticeIncomingCall(callerFrame);
3363 m_incomingLLIntCalls.push(incoming);
3366 CodeBlock* CodeBlock::newReplacement()
3368 return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
3372 CodeBlock* CodeBlock::replacement()
3374 const ClassInfo* classInfo = this->classInfo();
3376 if (classInfo == FunctionCodeBlock::info())
3377 return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
3379 if (classInfo == EvalCodeBlock::info())
3380 return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
3382 if (classInfo == ProgramCodeBlock::info())
3383 return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
3385 if (classInfo == ModuleProgramCodeBlock::info())
3386 return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
3388 #if ENABLE(WEBASSEMBLY)
3389 if (classInfo == WebAssemblyCodeBlock::info())
3393 RELEASE_ASSERT_NOT_REACHED();
3397 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
3399 const ClassInfo* classInfo = this->classInfo();
3401 if (classInfo == FunctionCodeBlock::info()) {
3402 if (m_isConstructor)
3403 return DFG::functionForConstructCapabilityLevel(this);
3404 return DFG::functionForCallCapabilityLevel(this);
3407 if (classInfo == EvalCodeBlock::info())
3408 return DFG::evalCapabilityLevel(this);
3410 if (classInfo == ProgramCodeBlock::info())
3411 return DFG::programCapabilityLevel(this);
3413 if (classInfo == ModuleProgramCodeBlock::info())
3414 return DFG::programCapabilityLevel(this);
3416 #if ENABLE(WEBASSEMBLY)
3417 if (classInfo == WebAssemblyCodeBlock::info())
3418 return DFG::CannotCompile;
3421 RELEASE_ASSERT_NOT_REACHED();
3422 return DFG::CannotCompile;
3425 #endif // ENABLE(JIT)
3427 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
3429 #if !ENABLE(DFG_JIT)
3431 UNUSED_PARAM(detail);
3434 CODEBLOCK_LOG_EVENT(this, "jettison", ("due to ", reason, ", counting = ", mode == CountReoptimization, ", detail = ", pointerDump(detail)));
3436 RELEASE_ASSERT(reason != Profiler::NotJettisoned);
3439 if (DFG::shouldDumpDisassembly()) {
3440 dataLog("Jettisoning ", *this);
3441 if (mode == CountReoptimization)
3442 dataLog(" and counting reoptimization");
3443 dataLog(" due to ", reason);
3445 dataLog(", ", *detail);
3449 if (reason == Profiler::JettisonDueToWeakReference) {
3450 if (DFG::shouldDumpDisassembly()) {
3451 dataLog(*this, " will be jettisoned because of the following dead references:\n");
3452 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
3453 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
3454 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
3455 JSCell* origin = transition.m_codeOrigin.get();
3456 JSCell* from = transition.m_from.get();
3457 JSCell* to = transition.m_to.get();
3458 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
3460 dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
3462 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
3463 JSCell* weak = dfgCommon->weakReferences[i].get();
3464 if (Heap::isMarked(weak))
3466 dataLog(" Weak reference ", RawPointer(weak), ".\n");
3470 #endif // ENABLE(DFG_JIT)
3472 DeferGCForAWhile deferGC(*heap());
3474 // We want to accomplish two things here:
3475 // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
3476 // we should OSR exit at the top of the next bytecode instruction after the return.
3477 // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
3480 if (reason != Profiler::JettisonDueToOldAge) {
3481 if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
3482 compilation->setJettisonReason(reason, detail);
3484 // This accomplishes (1), and does its own book-keeping about whether it has already happened.
3485 if (!jitCode()->dfgCommon()->invalidate()) {
3486 // We've already been invalidated.
3487 RELEASE_ASSERT(this != replacement() || (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable())));
3492 if (DFG::shouldDumpDisassembly())
3493 dataLog(" Did invalidate ", *this, "\n");
3495 // Count the reoptimization if that's what the user wanted.
3496 if (mode == CountReoptimization) {
3497 // FIXME: Maybe this should call alternative().
3498 // https://bugs.webkit.org/show_bug.cgi?id=123677
3499 baselineAlternative()->countReoptimization();
3500 if (DFG::shouldDumpDisassembly())
3501 dataLog(" Did count reoptimization for ", *this, "\n");
3504 if (this != replacement()) {
3505 // This means that we were never the entrypoint. This can happen for OSR entry code
3511 alternative()->optimizeAfterWarmUp();
3513 if (reason != Profiler::JettisonDueToOldAge)
3514 tallyFrequentExitSites();
3515 #endif // ENABLE(DFG_JIT)
3517 // Jettison can happen during GC. We don't want to install code to a dead executable
3518 // because that would add a dead object to the remembered set.
3519 if (m_vm->heap.isCurrentThreadBusy() && !Heap::isMarked(ownerScriptExecutable()))
3522 // This accomplishes (2).
3523 ownerScriptExecutable()->installCode(
3524 m_globalObject->vm(), alternative(), codeType(), specializationKind());
3527 if (DFG::shouldDumpDisassembly())
3528 dataLog(" Did install baseline version of ", *this, "\n");
3529 #endif // ENABLE(DFG_JIT)
3532 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
3534 if (!codeOrigin.inlineCallFrame)
3535 return globalObject();
3536 return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
3539 class RecursionCheckFunctor {
3541 RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
3542 : m_startCallFrame(startCallFrame)
3543 , m_codeBlock(codeBlock)
3544 , m_depthToCheck(depthToCheck)
3545 , m_foundStartCallFrame(false)
3546 , m_didRecurse(false)
3549 StackVisitor::Status operator()(StackVisitor& visitor) const
3551 CallFrame* currentCallFrame = visitor->callFrame();
3553 if (currentCallFrame == m_startCallFrame)
3554 m_foundStartCallFrame = true;
3556 if (m_foundStartCallFrame) {
3557 if (visitor->callFrame()->codeBlock() == m_codeBlock) {
3558 m_didRecurse = true;
3559 return StackVisitor::Done;
3562 if (!m_depthToCheck--)
3563 return StackVisitor::Done;
3566 return StackVisitor::Continue;
3569 bool didRecurse() const { return m_didRecurse; }
3572 CallFrame* m_startCallFrame;
3573 CodeBlock* m_codeBlock;
3574 mutable unsigned m_depthToCheck;
3575 mutable bool m_foundStartCallFrame;
3576 mutable bool m_didRecurse;
3579 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
3581 CodeBlock* callerCodeBlock = callerFrame->codeBlock();
3583 if (Options::verboseCallLink())
3584 dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
3587 if (!m_shouldAlwaysBeInlined)
3590 if (!callerCodeBlock) {
3591 m_shouldAlwaysBeInlined = false;
3592 if (Options::verboseCallLink())
3593 dataLog(" Clearing SABI because caller is native.\n");
3597 if (!hasBaselineJITProfiling())
3600 if (!DFG::mightInlineFunction(this))
3603 if (!canInline(capabilityLevelState()))
3606 if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
3607 m_shouldAlwaysBeInlined = false;
3608 if (Options::verboseCallLink())
3609 dataLog(" Clearing SABI because caller is too large.\n");
3613 if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
3614 // If the caller is still in the interpreter, then we can't expect inlining to
3615 // happen anytime soon. Assume it's profitable to optimize it separately. This
3616 // ensures that a function is SABI only if it is called no more frequently than
3617 // any of its callers.
3618 m_shouldAlwaysBeInlined = false;
3619 if (Options::verboseCallLink())
3620 dataLog(" Clearing SABI because caller is in LLInt.\n");
3624 if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
3625 m_shouldAlwaysBeInlined = false;
3626 if (Options::verboseCallLink())
3627 dataLog(" Clearing SABI bcause caller was already optimized.\n");
3631 if (callerCodeBlock->codeType() != FunctionCode) {
3632 // If the caller is either eval or global code, assume that that won't be
3633 // optimized anytime soon. For eval code this is particularly true since we
3634 // delay eval optimization by a *lot*.
3635 m_shouldAlwaysBeInlined = false;
3636 if (Options::verboseCallLink())
3637 dataLog(" Clearing SABI because caller is not a function.\n");
3641 // Recursive calls won't be inlined.
3642 RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
3643 vm()->topCallFrame->iterate(functor);
3645 if (functor.didRecurse()) {
3646 if (Options::verboseCallLink())
3647 dataLog(" Clearing SABI because recursion was detected.\n");
3648 m_shouldAlwaysBeInlined = false;
3652 if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
3653 dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
3657 if (canCompile(callerCodeBlock->capabilityLevelState()))
3660 if (Options::verboseCallLink())
3661 dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
3663 m_shouldAlwaysBeInlined = false;
3667 unsigned CodeBlock::reoptimizationRetryCounter() const
3670 ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
3671 return m_reoptimizationRetryCounter;
3674 #endif // ENABLE(JIT)
3678 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
3680 m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
3683 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
3685 m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
3688 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
3690 static const unsigned cpuRegisterSize = sizeof(void*);
3691 return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
3695 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
3697 return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
3700 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
3702 return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
3705 void CodeBlock::countReoptimization()
3707 m_reoptimizationRetryCounter++;
3708 if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
3709 m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
3712 unsigned CodeBlock::numberOfDFGCompiles()
3714 ASSERT(JITCode::isBaselineCode(jitType()));
3715 if (Options::testTheFTL()) {
3716 if (m_didFailFTLCompilation)
3718 return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
3720 return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
3723 int32_t CodeBlock::codeTypeThresholdMultiplier() const
3725 if (codeType() == EvalCode)
3726 return Options::evalThresholdMultiplier();
3731 double CodeBlock::optimizationThresholdScalingFactor()
3733 // This expression arises from doing a least-squares fit of
3735 // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
3737 // against the data points:
3740 // 10 0.9 (smallest reasonable code block)
3741 // 200 1.0 (typical small-ish code block)
3742 // 320 1.2 (something I saw in 3d-cube that I wanted to optimize)
3743 // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize)
3744 // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort)
3745 // 10000 6.0 (similar to above)
3747 // I achieve the minimization using the following Mathematica code:
3749 // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
3751 // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
3754 // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
3755 // {a, b, c, d}][[2]]
3757 // And the code below (to initialize a, b, c, d) is generated by:
3759 // Print["const double " <> ToString[#[[1]]] <> " = " <>