2 * Copyright (C) 2008-2010, 2012-2016 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "BasicBlockLocation.h"
34 #include "BytecodeGenerator.h"
35 #include "BytecodeUseDef.h"
36 #include "CallLinkStatus.h"
37 #include "DFGCapabilities.h"
38 #include "DFGCommon.h"
39 #include "DFGDriver.h"
40 #include "DFGJITCode.h"
41 #include "DFGWorklist.h"
43 #include "FunctionExecutableDump.h"
44 #include "GetPutInfo.h"
45 #include "InlineCallFrame.h"
46 #include "Interpreter.h"
48 #include "JSCJSValue.h"
49 #include "JSFunction.h"
50 #include "JSLexicalEnvironment.h"
51 #include "JSModuleEnvironment.h"
52 #include "LLIntEntrypoint.h"
53 #include "LowLevelInterpreter.h"
54 #include "JSCInlines.h"
55 #include "PCToCodeOriginMap.h"
56 #include "PolymorphicAccess.h"
57 #include "ProfilerDatabase.h"
58 #include "ReduceWhitespace.h"
60 #include "SlotVisitorInlines.h"
61 #include "StackVisitor.h"
62 #include "StructureStubInfo.h"
63 #include "TypeLocationCache.h"
64 #include "TypeProfiler.h"
65 #include "UnlinkedInstructionStream.h"
66 #include <wtf/BagToHashMap.h>
67 #include <wtf/CommaPrinter.h>
68 #include <wtf/StringExtras.h>
69 #include <wtf/StringPrintStream.h>
70 #include <wtf/text/UniquedStringImpl.h>
73 #include "RegisterAtOffsetList.h"
77 #include "DFGOperations.h"
81 #include "FTLJITCode.h"
86 const ClassInfo CodeBlock::s_info = {
88 CREATE_METHOD_TABLE(CodeBlock)
91 const ClassInfo FunctionCodeBlock::s_info = {
92 "FunctionCodeBlock", &Base::s_info, 0,
93 CREATE_METHOD_TABLE(FunctionCodeBlock)
96 #if ENABLE(WEBASSEMBLY)
97 const ClassInfo WebAssemblyCodeBlock::s_info = {
98 "WebAssemblyCodeBlock", &Base::s_info, 0,
99 CREATE_METHOD_TABLE(WebAssemblyCodeBlock)
103 const ClassInfo GlobalCodeBlock::s_info = {
104 "GlobalCodeBlock", &Base::s_info, 0,
105 CREATE_METHOD_TABLE(GlobalCodeBlock)
108 const ClassInfo ProgramCodeBlock::s_info = {
109 "ProgramCodeBlock", &Base::s_info, 0,
110 CREATE_METHOD_TABLE(ProgramCodeBlock)
113 const ClassInfo ModuleProgramCodeBlock::s_info = {
114 "ModuleProgramCodeBlock", &Base::s_info, 0,
115 CREATE_METHOD_TABLE(ModuleProgramCodeBlock)
118 const ClassInfo EvalCodeBlock::s_info = {
119 "EvalCodeBlock", &Base::s_info, 0,
120 CREATE_METHOD_TABLE(EvalCodeBlock)
123 void FunctionCodeBlock::destroy(JSCell* cell)
125 jsCast<FunctionCodeBlock*>(cell)->~FunctionCodeBlock();
128 #if ENABLE(WEBASSEMBLY)
129 void WebAssemblyCodeBlock::destroy(JSCell* cell)
131 jsCast<WebAssemblyCodeBlock*>(cell)->~WebAssemblyCodeBlock();
135 void ProgramCodeBlock::destroy(JSCell* cell)
137 jsCast<ProgramCodeBlock*>(cell)->~ProgramCodeBlock();
140 void ModuleProgramCodeBlock::destroy(JSCell* cell)
142 jsCast<ModuleProgramCodeBlock*>(cell)->~ModuleProgramCodeBlock();
145 void EvalCodeBlock::destroy(JSCell* cell)
147 jsCast<EvalCodeBlock*>(cell)->~EvalCodeBlock();
150 CString CodeBlock::inferredName() const
152 switch (codeType()) {
158 return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
163 return CString("", 0);
167 bool CodeBlock::hasHash() const
172 bool CodeBlock::isSafeToComputeHash() const
174 return !isCompilationThread();
177 CodeBlockHash CodeBlock::hash() const
180 RELEASE_ASSERT(isSafeToComputeHash());
181 m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
186 CString CodeBlock::sourceCodeForTools() const
188 if (codeType() != FunctionCode)
189 return ownerScriptExecutable()->source().toUTF8();
191 SourceProvider* provider = source();
192 FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
193 UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
194 unsigned unlinkedStartOffset = unlinked->startOffset();
195 unsigned linkedStartOffset = executable->source().startOffset();
196 int delta = linkedStartOffset - unlinkedStartOffset;
197 unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
198 unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
201 provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8());
204 CString CodeBlock::sourceCodeOnOneLine() const
206 return reduceWhitespace(sourceCodeForTools());
209 CString CodeBlock::hashAsStringIfPossible() const
211 if (hasHash() || isSafeToComputeHash())
212 return toCString(hash());
216 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
218 out.print(inferredName(), "#", hashAsStringIfPossible());
219 out.print(":[", RawPointer(this), "->");
221 out.print(RawPointer(alternative()), "->");
222 out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
224 if (codeType() == FunctionCode)
225 out.print(specializationKind());
226 out.print(", ", instructionCount());
227 if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
228 out.print(" (ShouldAlwaysBeInlined)");
229 if (ownerScriptExecutable()->neverInline())
230 out.print(" (NeverInline)");
231 if (ownerScriptExecutable()->neverOptimize())
232 out.print(" (NeverOptimize)");
233 else if (ownerScriptExecutable()->neverFTLOptimize())
234 out.print(" (NeverFTLOptimize)");
235 if (ownerScriptExecutable()->didTryToEnterInLoop())
236 out.print(" (DidTryToEnterInLoop)");
237 if (ownerScriptExecutable()->isStrictMode())
238 out.print(" (StrictMode)");
239 if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
240 out.print(" (FTLFail)");
241 if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
242 out.print(" (HadFTLReplacement)");
246 void CodeBlock::dump(PrintStream& out) const
248 dumpAssumingJITType(out, jitType());
251 static CString idName(int id0, const Identifier& ident)
253 return toCString(ident.impl(), "(@id", id0, ")");
256 CString CodeBlock::registerName(int r) const
258 if (isConstantRegisterIndex(r))
259 return constantName(r);
261 return toCString(VirtualRegister(r));
264 CString CodeBlock::constantName(int index) const
266 JSValue value = getConstant(index);
267 return toCString(value, "(", VirtualRegister(index), ")");
270 static CString regexpToSourceString(RegExp* regExp)
272 char postfix[5] = { '/', 0, 0, 0, 0 };
274 if (regExp->global())
275 postfix[index++] = 'g';
276 if (regExp->ignoreCase())
277 postfix[index++] = 'i';
278 if (regExp->multiline())
279 postfix[index] = 'm';
280 if (regExp->sticky())
281 postfix[index++] = 'y';
282 if (regExp->unicode())
283 postfix[index++] = 'u';
285 return toCString("/", regExp->pattern().impl(), postfix);
288 static CString regexpName(int re, RegExp* regexp)
290 return toCString(regexpToSourceString(regexp), "(@re", re, ")");
293 NEVER_INLINE static const char* debugHookName(int debugHookID)
295 switch (static_cast<DebugHookID>(debugHookID)) {
296 case DidEnterCallFrame:
297 return "didEnterCallFrame";
298 case WillLeaveCallFrame:
299 return "willLeaveCallFrame";
300 case WillExecuteStatement:
301 return "willExecuteStatement";
302 case WillExecuteProgram:
303 return "willExecuteProgram";
304 case DidExecuteProgram:
305 return "didExecuteProgram";
306 case DidReachBreakpoint:
307 return "didReachBreakpoint";
310 RELEASE_ASSERT_NOT_REACHED();
314 void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
316 int r0 = (++it)->u.operand;
317 int r1 = (++it)->u.operand;
319 printLocationAndOp(out, exec, location, it, op);
320 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
323 void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
325 int r0 = (++it)->u.operand;
326 int r1 = (++it)->u.operand;
327 int r2 = (++it)->u.operand;
328 printLocationAndOp(out, exec, location, it, op);
329 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
332 void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
334 int r0 = (++it)->u.operand;
335 int offset = (++it)->u.operand;
336 printLocationAndOp(out, exec, location, it, op);
337 out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
340 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
343 switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
347 case op_get_array_length:
351 RELEASE_ASSERT_NOT_REACHED();
352 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
356 int r0 = (++it)->u.operand;
357 int r1 = (++it)->u.operand;
358 int id0 = (++it)->u.operand;
359 printLocationAndOp(out, exec, location, it, op);
360 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
361 it += 4; // Increment up to the value profiler.
364 static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
369 out.printf("%s = %p", name, structure);
371 PropertyOffset offset = structure->getConcurrently(ident.impl());
372 if (offset != invalidOffset)
373 out.printf(" (offset = %d)", offset);
376 static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
378 out.printf("chain = %p: [", chain);
380 for (WriteBarrier<Structure>* currentStructure = chain->head();
382 ++currentStructure) {
387 dumpStructure(out, "struct", currentStructure->get(), ident);
392 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
394 Instruction* instruction = instructions().begin() + location;
396 const Identifier& ident = identifier(instruction[3].u.operand);
398 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
400 if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
401 out.printf(" llint(array_length)");
402 else if (StructureID structureID = instruction[4].u.structureID) {
403 Structure* structure = m_vm->heap.structureIDTable().get(structureID);
404 out.printf(" llint(");
405 dumpStructure(out, "struct", structure, ident);
410 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
411 StructureStubInfo& stubInfo = *stubPtr;
412 if (stubInfo.resetByGC)
413 out.print(" (Reset By GC)");
417 Structure* baseStructure = nullptr;
418 PolymorphicAccess* stub = nullptr;
420 switch (stubInfo.cacheType) {
421 case CacheType::GetByIdSelf:
423 baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get();
425 case CacheType::Stub:
427 stub = stubInfo.u.stub;
429 case CacheType::Unset:
433 RELEASE_ASSERT_NOT_REACHED();
439 dumpStructure(out, "struct", baseStructure, ident);
443 out.print(", ", *stub);
452 void CodeBlock::printPutByIdCacheStatus(PrintStream& out, int location, const StubInfoMap& map)
454 Instruction* instruction = instructions().begin() + location;
456 const Identifier& ident = identifier(instruction[2].u.operand);
458 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
460 out.print(", ", instruction[8].u.putByIdFlags);
462 if (StructureID structureID = instruction[4].u.structureID) {
463 Structure* structure = m_vm->heap.structureIDTable().get(structureID);
464 out.print(" llint(");
465 if (StructureID newStructureID = instruction[6].u.structureID) {
466 Structure* newStructure = m_vm->heap.structureIDTable().get(newStructureID);
467 dumpStructure(out, "prev", structure, ident);
469 dumpStructure(out, "next", newStructure, ident);
470 if (StructureChain* chain = instruction[7].u.structureChain.get()) {
472 dumpChain(out, chain, ident);
475 dumpStructure(out, "struct", structure, ident);
480 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
481 StructureStubInfo& stubInfo = *stubPtr;
482 if (stubInfo.resetByGC)
483 out.print(" (Reset By GC)");
487 switch (stubInfo.cacheType) {
488 case CacheType::PutByIdReplace:
489 out.print("replace, ");
490 dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident);
492 case CacheType::Stub: {
493 out.print("stub, ", *stubInfo.u.stub);
496 case CacheType::Unset:
500 RELEASE_ASSERT_NOT_REACHED();
510 void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
512 int dst = (++it)->u.operand;
513 int func = (++it)->u.operand;
514 int argCount = (++it)->u.operand;
515 int registerOffset = (++it)->u.operand;
516 printLocationAndOp(out, exec, location, it, op);
517 out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
518 if (cacheDumpMode == DumpCaches) {
519 LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
520 if (callLinkInfo->lastSeenCallee) {
522 " llint(%p, exec %p)",
523 callLinkInfo->lastSeenCallee.get(),
524 callLinkInfo->lastSeenCallee->executable());
527 if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
528 JSFunction* target = info->lastSeenCallee();
530 out.printf(" jit(%p, exec %p)", target, target->executable());
533 if (jitType() != JITCode::FTLJIT)
534 out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
541 dumpArrayProfiling(out, it, hasPrintedProfiling);
542 dumpValueProfiling(out, it, hasPrintedProfiling);
545 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
547 int r0 = (++it)->u.operand;
548 int id0 = (++it)->u.operand;
549 int r1 = (++it)->u.operand;
550 printLocationAndOp(out, exec, location, it, op);
551 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
555 void CodeBlock::dumpSource()
557 dumpSource(WTF::dataFile());
560 void CodeBlock::dumpSource(PrintStream& out)
562 ScriptExecutable* executable = ownerScriptExecutable();
563 if (executable->isFunctionExecutable()) {
564 FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
565 StringView source = functionExecutable->source().provider()->getRange(
566 functionExecutable->parametersStartOffset(),
567 functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
569 out.print("function ", inferredName(), source);
572 out.print(executable->source().view());
575 void CodeBlock::dumpBytecode()
577 dumpBytecode(WTF::dataFile());
580 void CodeBlock::dumpBytecode(PrintStream& out)
582 // We only use the ExecState* for things that don't actually lead to JS execution,
583 // like converting a JSString to a String. Hence the globalExec is appropriate.
584 ExecState* exec = m_globalObject->globalExec();
586 size_t instructionCount = 0;
588 for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
593 ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
594 static_cast<unsigned long>(instructions().size()),
595 static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
596 m_numParameters, m_numCalleeLocals, m_numVars);
599 StubInfoMap stubInfos;
600 CallLinkInfoMap callLinkInfos;
601 getStubInfoMap(stubInfos);
602 getCallLinkInfoMap(callLinkInfos);
604 const Instruction* begin = instructions().begin();
605 const Instruction* end = instructions().end();
606 for (const Instruction* it = begin; it != end; ++it)
607 dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
609 if (numberOfIdentifiers()) {
610 out.printf("\nIdentifiers:\n");
613 out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
615 } while (i != numberOfIdentifiers());
618 if (!m_constantRegisters.isEmpty()) {
619 out.printf("\nConstants:\n");
622 const char* sourceCodeRepresentationDescription = nullptr;
623 switch (m_constantsSourceCodeRepresentation[i]) {
624 case SourceCodeRepresentation::Double:
625 sourceCodeRepresentationDescription = ": in source as double";
627 case SourceCodeRepresentation::Integer:
628 sourceCodeRepresentationDescription = ": in source as integer";
630 case SourceCodeRepresentation::Other:
631 sourceCodeRepresentationDescription = "";
634 out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
636 } while (i < m_constantRegisters.size());
639 if (size_t count = m_unlinkedCode->numberOfRegExps()) {
640 out.printf("\nm_regexps:\n");
643 out.printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
648 dumpExceptionHandlers(out);
650 if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
651 out.printf("Switch Jump Tables:\n");
654 out.printf(" %1d = {\n", i);
656 Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
657 for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
660 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
664 } while (i < m_rareData->m_switchJumpTables.size());
667 if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
668 out.printf("\nString Switch Jump Tables:\n");
671 out.printf(" %1d = {\n", i);
672 StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
673 for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
674 out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
677 } while (i < m_rareData->m_stringSwitchJumpTables.size());
680 if (m_rareData && !m_rareData->m_liveCalleeLocalsAtYield.isEmpty()) {
681 out.printf("\nLive Callee Locals:\n");
684 const FastBitVector& liveness = m_rareData->m_liveCalleeLocalsAtYield[i];
685 out.printf(" live%1u = ", i);
689 } while (i < m_rareData->m_liveCalleeLocalsAtYield.size());
695 void CodeBlock::dumpExceptionHandlers(PrintStream& out)
697 if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
698 out.printf("\nException Handlers:\n");
701 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
702 out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n",
703 i + 1, handler.start, handler.end, handler.target, handler.typeName());
705 } while (i < m_rareData->m_exceptionHandlers.size());
709 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
711 if (hasPrintedProfiling) {
717 hasPrintedProfiling = true;
720 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
722 ConcurrentJITLocker locker(m_lock);
725 CString description = it->u.profile->briefDescription(locker);
726 if (!description.length())
728 beginDumpProfiling(out, hasPrintedProfiling);
729 out.print(description);
732 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
734 ConcurrentJITLocker locker(m_lock);
737 if (!it->u.arrayProfile)
739 CString description = it->u.arrayProfile->briefDescription(locker, this);
740 if (!description.length())
742 beginDumpProfiling(out, hasPrintedProfiling);
743 out.print(description);
746 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
748 if (!profile || !profile->m_counter)
751 beginDumpProfiling(out, hasPrintedProfiling);
752 out.print(name, profile->m_counter);
755 void CodeBlock::dumpResultProfile(PrintStream& out, ResultProfile* profile, bool& hasPrintedProfiling)
760 beginDumpProfiling(out, hasPrintedProfiling);
761 out.print("results: ", *profile);
764 void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
766 out.printf("[%4d] %-17s ", location, op);
769 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
771 printLocationAndOp(out, exec, location, it, op);
772 out.printf("%s", registerName(operand).data());
775 void CodeBlock::dumpBytecode(
776 PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
777 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
779 int location = it - begin;
780 bool hasPrintedProfiling = false;
781 OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
784 printLocationAndOp(out, exec, location, it, "enter");
788 int r0 = (++it)->u.operand;
789 printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
792 case op_create_direct_arguments: {
793 int r0 = (++it)->u.operand;
794 printLocationAndOp(out, exec, location, it, "create_direct_arguments");
795 out.printf("%s", registerName(r0).data());
798 case op_create_scoped_arguments: {
799 int r0 = (++it)->u.operand;
800 int r1 = (++it)->u.operand;
801 printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
802 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
805 case op_create_cloned_arguments: {
806 int r0 = (++it)->u.operand;
807 printLocationAndOp(out, exec, location, it, "create_cloned_arguments");
808 out.printf("%s", registerName(r0).data());
812 int r0 = (++it)->u.operand;
813 int r1 = (++it)->u.operand;
814 unsigned argumentOffset = (++it)->u.unsignedValue;
815 printLocationAndOp(out, exec, location, it, "copy_rest");
816 out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data());
817 out.printf("ArgumentsOffset: %u", argumentOffset);
820 case op_get_rest_length: {
821 int r0 = (++it)->u.operand;
822 printLocationAndOp(out, exec, location, it, "get_rest_length");
823 out.printf("%s, ", registerName(r0).data());
824 unsigned argumentOffset = (++it)->u.unsignedValue;
825 out.printf("ArgumentsOffset: %u", argumentOffset);
828 case op_create_this: {
829 int r0 = (++it)->u.operand;
830 int r1 = (++it)->u.operand;
831 unsigned inferredInlineCapacity = (++it)->u.operand;
832 unsigned cachedFunction = (++it)->u.operand;
833 printLocationAndOp(out, exec, location, it, "create_this");
834 out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
838 int r0 = (++it)->u.operand;
839 printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
840 Structure* structure = (++it)->u.structure.get();
842 out.print(", cache(struct = ", RawPointer(structure), ")");
843 out.print(", ", (++it)->u.toThisStatus);
847 int r0 = (++it)->u.operand;
848 printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
851 case op_new_object: {
852 int r0 = (++it)->u.operand;
853 unsigned inferredInlineCapacity = (++it)->u.operand;
854 printLocationAndOp(out, exec, location, it, "new_object");
855 out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
856 ++it; // Skip object allocation profile.
860 int dst = (++it)->u.operand;
861 int argv = (++it)->u.operand;
862 int argc = (++it)->u.operand;
863 printLocationAndOp(out, exec, location, it, "new_array");
864 out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
865 ++it; // Skip array allocation profile.
868 case op_new_array_with_size: {
869 int dst = (++it)->u.operand;
870 int length = (++it)->u.operand;
871 printLocationAndOp(out, exec, location, it, "new_array_with_size");
872 out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
873 ++it; // Skip array allocation profile.
876 case op_new_array_buffer: {
877 int dst = (++it)->u.operand;
878 int argv = (++it)->u.operand;
879 int argc = (++it)->u.operand;
880 printLocationAndOp(out, exec, location, it, "new_array_buffer");
881 out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
882 ++it; // Skip array allocation profile.
885 case op_new_regexp: {
886 int r0 = (++it)->u.operand;
887 int re0 = (++it)->u.operand;
888 printLocationAndOp(out, exec, location, it, "new_regexp");
889 out.printf("%s, ", registerName(r0).data());
890 if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
891 out.printf("%s", regexpName(re0, regexp(re0)).data());
893 out.printf("bad_regexp(%d)", re0);
897 int r0 = (++it)->u.operand;
898 int r1 = (++it)->u.operand;
899 printLocationAndOp(out, exec, location, it, "mov");
900 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
903 case op_profile_type: {
904 int r0 = (++it)->u.operand;
909 printLocationAndOp(out, exec, location, it, "op_profile_type");
910 out.printf("%s", registerName(r0).data());
913 case op_profile_control_flow: {
914 BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
915 printLocationAndOp(out, exec, location, it, "profile_control_flow");
916 out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
920 printUnaryOp(out, exec, location, it, "not");
924 printBinaryOp(out, exec, location, it, "eq");
928 printUnaryOp(out, exec, location, it, "eq_null");
932 printBinaryOp(out, exec, location, it, "neq");
936 printUnaryOp(out, exec, location, it, "neq_null");
940 printBinaryOp(out, exec, location, it, "stricteq");
944 printBinaryOp(out, exec, location, it, "nstricteq");
948 printBinaryOp(out, exec, location, it, "less");
952 printBinaryOp(out, exec, location, it, "lesseq");
956 printBinaryOp(out, exec, location, it, "greater");
960 printBinaryOp(out, exec, location, it, "greatereq");
964 int r0 = (++it)->u.operand;
965 printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
969 int r0 = (++it)->u.operand;
970 printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
974 printUnaryOp(out, exec, location, it, "to_number");
978 printUnaryOp(out, exec, location, it, "to_string");
982 printUnaryOp(out, exec, location, it, "negate");
986 printBinaryOp(out, exec, location, it, "add");
991 printBinaryOp(out, exec, location, it, "mul");
996 printBinaryOp(out, exec, location, it, "div");
1001 printBinaryOp(out, exec, location, it, "mod");
1005 printBinaryOp(out, exec, location, it, "sub");
1010 printBinaryOp(out, exec, location, it, "lshift");
1014 printBinaryOp(out, exec, location, it, "rshift");
1018 printBinaryOp(out, exec, location, it, "urshift");
1022 printBinaryOp(out, exec, location, it, "bitand");
1027 printBinaryOp(out, exec, location, it, "bitxor");
1032 printBinaryOp(out, exec, location, it, "bitor");
1036 case op_overrides_has_instance: {
1037 int r0 = (++it)->u.operand;
1038 int r1 = (++it)->u.operand;
1039 int r2 = (++it)->u.operand;
1040 printLocationAndOp(out, exec, location, it, "overrides_has_instance");
1041 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1044 case op_instanceof: {
1045 int r0 = (++it)->u.operand;
1046 int r1 = (++it)->u.operand;
1047 int r2 = (++it)->u.operand;
1048 printLocationAndOp(out, exec, location, it, "instanceof");
1049 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1052 case op_instanceof_custom: {
1053 int r0 = (++it)->u.operand;
1054 int r1 = (++it)->u.operand;
1055 int r2 = (++it)->u.operand;
1056 int r3 = (++it)->u.operand;
1057 printLocationAndOp(out, exec, location, it, "instanceof_custom");
1058 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
1062 printUnaryOp(out, exec, location, it, "unsigned");
1066 printUnaryOp(out, exec, location, it, "typeof");
1070 printUnaryOp(out, exec, location, it, "is_empty");
1073 case op_is_undefined: {
1074 printUnaryOp(out, exec, location, it, "is_undefined");
1077 case op_is_boolean: {
1078 printUnaryOp(out, exec, location, it, "is_boolean");
1081 case op_is_number: {
1082 printUnaryOp(out, exec, location, it, "is_number");
1085 case op_is_string: {
1086 printUnaryOp(out, exec, location, it, "is_string");
1089 case op_is_object: {
1090 printUnaryOp(out, exec, location, it, "is_object");
1093 case op_is_object_or_null: {
1094 printUnaryOp(out, exec, location, it, "is_object_or_null");
1097 case op_is_function: {
1098 printUnaryOp(out, exec, location, it, "is_function");
1102 printBinaryOp(out, exec, location, it, "in");
1105 case op_try_get_by_id: {
1106 int r0 = (++it)->u.operand;
1107 int r1 = (++it)->u.operand;
1108 int id0 = (++it)->u.operand;
1109 printLocationAndOp(out, exec, location, it, "try_get_by_id");
1110 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1114 case op_get_array_length: {
1115 printGetByIdOp(out, exec, location, it);
1116 printGetByIdCacheStatus(out, exec, location, stubInfos);
1117 dumpValueProfiling(out, it, hasPrintedProfiling);
1120 case op_put_by_id: {
1121 printPutByIdOp(out, exec, location, it, "put_by_id");
1122 printPutByIdCacheStatus(out, location, stubInfos);
1125 case op_put_getter_by_id: {
1126 int r0 = (++it)->u.operand;
1127 int id0 = (++it)->u.operand;
1128 int n0 = (++it)->u.operand;
1129 int r1 = (++it)->u.operand;
1130 printLocationAndOp(out, exec, location, it, "put_getter_by_id");
1131 out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
1134 case op_put_setter_by_id: {
1135 int r0 = (++it)->u.operand;
1136 int id0 = (++it)->u.operand;
1137 int n0 = (++it)->u.operand;
1138 int r1 = (++it)->u.operand;
1139 printLocationAndOp(out, exec, location, it, "put_setter_by_id");
1140 out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
1143 case op_put_getter_setter_by_id: {
1144 int r0 = (++it)->u.operand;
1145 int id0 = (++it)->u.operand;
1146 int n0 = (++it)->u.operand;
1147 int r1 = (++it)->u.operand;
1148 int r2 = (++it)->u.operand;
1149 printLocationAndOp(out, exec, location, it, "put_getter_setter_by_id");
1150 out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data());
1153 case op_put_getter_by_val: {
1154 int r0 = (++it)->u.operand;
1155 int r1 = (++it)->u.operand;
1156 int n0 = (++it)->u.operand;
1157 int r2 = (++it)->u.operand;
1158 printLocationAndOp(out, exec, location, it, "put_getter_by_val");
1159 out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
1162 case op_put_setter_by_val: {
1163 int r0 = (++it)->u.operand;
1164 int r1 = (++it)->u.operand;
1165 int n0 = (++it)->u.operand;
1166 int r2 = (++it)->u.operand;
1167 printLocationAndOp(out, exec, location, it, "put_setter_by_val");
1168 out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
1171 case op_del_by_id: {
1172 int r0 = (++it)->u.operand;
1173 int r1 = (++it)->u.operand;
1174 int id0 = (++it)->u.operand;
1175 printLocationAndOp(out, exec, location, it, "del_by_id");
1176 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1179 case op_get_by_val: {
1180 int r0 = (++it)->u.operand;
1181 int r1 = (++it)->u.operand;
1182 int r2 = (++it)->u.operand;
1183 printLocationAndOp(out, exec, location, it, "get_by_val");
1184 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1185 dumpArrayProfiling(out, it, hasPrintedProfiling);
1186 dumpValueProfiling(out, it, hasPrintedProfiling);
1189 case op_put_by_val: {
1190 int r0 = (++it)->u.operand;
1191 int r1 = (++it)->u.operand;
1192 int r2 = (++it)->u.operand;
1193 printLocationAndOp(out, exec, location, it, "put_by_val");
1194 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1195 dumpArrayProfiling(out, it, hasPrintedProfiling);
1198 case op_put_by_val_direct: {
1199 int r0 = (++it)->u.operand;
1200 int r1 = (++it)->u.operand;
1201 int r2 = (++it)->u.operand;
1202 printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1203 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1204 dumpArrayProfiling(out, it, hasPrintedProfiling);
1207 case op_del_by_val: {
1208 int r0 = (++it)->u.operand;
1209 int r1 = (++it)->u.operand;
1210 int r2 = (++it)->u.operand;
1211 printLocationAndOp(out, exec, location, it, "del_by_val");
1212 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1215 case op_put_by_index: {
1216 int r0 = (++it)->u.operand;
1217 unsigned n0 = (++it)->u.operand;
1218 int r1 = (++it)->u.operand;
1219 printLocationAndOp(out, exec, location, it, "put_by_index");
1220 out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1224 int offset = (++it)->u.operand;
1225 printLocationAndOp(out, exec, location, it, "jmp");
1226 out.printf("%d(->%d)", offset, location + offset);
1230 printConditionalJump(out, exec, begin, it, location, "jtrue");
1234 printConditionalJump(out, exec, begin, it, location, "jfalse");
1238 printConditionalJump(out, exec, begin, it, location, "jeq_null");
1241 case op_jneq_null: {
1242 printConditionalJump(out, exec, begin, it, location, "jneq_null");
1246 int r0 = (++it)->u.operand;
1247 Special::Pointer pointer = (++it)->u.specialPointer;
1248 int offset = (++it)->u.operand;
1249 printLocationAndOp(out, exec, location, it, "jneq_ptr");
1250 out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1254 int r0 = (++it)->u.operand;
1255 int r1 = (++it)->u.operand;
1256 int offset = (++it)->u.operand;
1257 printLocationAndOp(out, exec, location, it, "jless");
1258 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1262 int r0 = (++it)->u.operand;
1263 int r1 = (++it)->u.operand;
1264 int offset = (++it)->u.operand;
1265 printLocationAndOp(out, exec, location, it, "jlesseq");
1266 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1270 int r0 = (++it)->u.operand;
1271 int r1 = (++it)->u.operand;
1272 int offset = (++it)->u.operand;
1273 printLocationAndOp(out, exec, location, it, "jgreater");
1274 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1277 case op_jgreatereq: {
1278 int r0 = (++it)->u.operand;
1279 int r1 = (++it)->u.operand;
1280 int offset = (++it)->u.operand;
1281 printLocationAndOp(out, exec, location, it, "jgreatereq");
1282 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1286 int r0 = (++it)->u.operand;
1287 int r1 = (++it)->u.operand;
1288 int offset = (++it)->u.operand;
1289 printLocationAndOp(out, exec, location, it, "jnless");
1290 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1294 int r0 = (++it)->u.operand;
1295 int r1 = (++it)->u.operand;
1296 int offset = (++it)->u.operand;
1297 printLocationAndOp(out, exec, location, it, "jnlesseq");
1298 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1301 case op_jngreater: {
1302 int r0 = (++it)->u.operand;
1303 int r1 = (++it)->u.operand;
1304 int offset = (++it)->u.operand;
1305 printLocationAndOp(out, exec, location, it, "jngreater");
1306 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1309 case op_jngreatereq: {
1310 int r0 = (++it)->u.operand;
1311 int r1 = (++it)->u.operand;
1312 int offset = (++it)->u.operand;
1313 printLocationAndOp(out, exec, location, it, "jngreatereq");
1314 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1317 case op_loop_hint: {
1318 printLocationAndOp(out, exec, location, it, "loop_hint");
1322 printLocationAndOp(out, exec, location, it, "watchdog");
1325 case op_log_shadow_chicken_prologue: {
1326 printLocationAndOp(out, exec, location, it, "log_shadow_chicken_prologue");
1329 case op_log_shadow_chicken_tail: {
1330 printLocationAndOp(out, exec, location, it, "log_shadow_chicken_tail");
1333 case op_switch_imm: {
1334 int tableIndex = (++it)->u.operand;
1335 int defaultTarget = (++it)->u.operand;
1336 int scrutineeRegister = (++it)->u.operand;
1337 printLocationAndOp(out, exec, location, it, "switch_imm");
1338 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1341 case op_switch_char: {
1342 int tableIndex = (++it)->u.operand;
1343 int defaultTarget = (++it)->u.operand;
1344 int scrutineeRegister = (++it)->u.operand;
1345 printLocationAndOp(out, exec, location, it, "switch_char");
1346 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1349 case op_switch_string: {
1350 int tableIndex = (++it)->u.operand;
1351 int defaultTarget = (++it)->u.operand;
1352 int scrutineeRegister = (++it)->u.operand;
1353 printLocationAndOp(out, exec, location, it, "switch_string");
1354 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1358 int r0 = (++it)->u.operand;
1359 int r1 = (++it)->u.operand;
1360 int f0 = (++it)->u.operand;
1361 printLocationAndOp(out, exec, location, it, "new_func");
1362 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1365 case op_new_generator_func: {
1366 int r0 = (++it)->u.operand;
1367 int r1 = (++it)->u.operand;
1368 int f0 = (++it)->u.operand;
1369 printLocationAndOp(out, exec, location, it, "new_generator_func");
1370 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1373 case op_new_arrow_func_exp: {
1374 int r0 = (++it)->u.operand;
1375 int r1 = (++it)->u.operand;
1376 int f0 = (++it)->u.operand;
1377 printLocationAndOp(out, exec, location, it, "op_new_arrow_func_exp");
1378 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1381 case op_new_func_exp: {
1382 int r0 = (++it)->u.operand;
1383 int r1 = (++it)->u.operand;
1384 int f0 = (++it)->u.operand;
1385 printLocationAndOp(out, exec, location, it, "new_func_exp");
1386 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1389 case op_new_generator_func_exp: {
1390 int r0 = (++it)->u.operand;
1391 int r1 = (++it)->u.operand;
1392 int f0 = (++it)->u.operand;
1393 printLocationAndOp(out, exec, location, it, "new_generator_func_exp");
1394 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1397 case op_set_function_name: {
1398 int funcReg = (++it)->u.operand;
1399 int nameReg = (++it)->u.operand;
1400 printLocationAndOp(out, exec, location, it, "set_function_name");
1401 out.printf("%s, %s", registerName(funcReg).data(), registerName(nameReg).data());
1405 printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1408 case op_tail_call: {
1409 printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1412 case op_call_eval: {
1413 printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
1417 case op_construct_varargs:
1418 case op_call_varargs:
1419 case op_tail_call_varargs: {
1420 int result = (++it)->u.operand;
1421 int callee = (++it)->u.operand;
1422 int thisValue = (++it)->u.operand;
1423 int arguments = (++it)->u.operand;
1424 int firstFreeRegister = (++it)->u.operand;
1425 int varArgOffset = (++it)->u.operand;
1427 printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : opcode == op_construct_varargs ? "construct_varargs" : "tail_call_varargs");
1428 out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
1429 dumpValueProfiling(out, it, hasPrintedProfiling);
1434 int r0 = (++it)->u.operand;
1435 printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1438 case op_construct: {
1439 printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
1443 int r0 = (++it)->u.operand;
1444 int r1 = (++it)->u.operand;
1445 int count = (++it)->u.operand;
1446 printLocationAndOp(out, exec, location, it, "strcat");
1447 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1450 case op_to_primitive: {
1451 int r0 = (++it)->u.operand;
1452 int r1 = (++it)->u.operand;
1453 printLocationAndOp(out, exec, location, it, "to_primitive");
1454 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1457 case op_get_enumerable_length: {
1458 int dst = it[1].u.operand;
1459 int base = it[2].u.operand;
1460 printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
1461 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1462 it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
1465 case op_has_indexed_property: {
1466 int dst = it[1].u.operand;
1467 int base = it[2].u.operand;
1468 int propertyName = it[3].u.operand;
1469 ArrayProfile* arrayProfile = it[4].u.arrayProfile;
1470 printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
1471 out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
1472 it += OPCODE_LENGTH(op_has_indexed_property) - 1;
1475 case op_has_structure_property: {
1476 int dst = it[1].u.operand;
1477 int base = it[2].u.operand;
1478 int propertyName = it[3].u.operand;
1479 int enumerator = it[4].u.operand;
1480 printLocationAndOp(out, exec, location, it, "op_has_structure_property");
1481 out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
1482 it += OPCODE_LENGTH(op_has_structure_property) - 1;
1485 case op_has_generic_property: {
1486 int dst = it[1].u.operand;
1487 int base = it[2].u.operand;
1488 int propertyName = it[3].u.operand;
1489 printLocationAndOp(out, exec, location, it, "op_has_generic_property");
1490 out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
1491 it += OPCODE_LENGTH(op_has_generic_property) - 1;
1494 case op_get_direct_pname: {
1495 int dst = it[1].u.operand;
1496 int base = it[2].u.operand;
1497 int propertyName = it[3].u.operand;
1498 int index = it[4].u.operand;
1499 int enumerator = it[5].u.operand;
1500 ValueProfile* profile = it[6].u.profile;
1501 printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
1502 out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
1503 it += OPCODE_LENGTH(op_get_direct_pname) - 1;
1507 case op_get_property_enumerator: {
1508 int dst = it[1].u.operand;
1509 int base = it[2].u.operand;
1510 printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
1511 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1512 it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
1515 case op_enumerator_structure_pname: {
1516 int dst = it[1].u.operand;
1517 int enumerator = it[2].u.operand;
1518 int index = it[3].u.operand;
1519 printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
1520 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1521 it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
1524 case op_enumerator_generic_pname: {
1525 int dst = it[1].u.operand;
1526 int enumerator = it[2].u.operand;
1527 int index = it[3].u.operand;
1528 printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
1529 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1530 it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
1533 case op_to_index_string: {
1534 int dst = it[1].u.operand;
1535 int index = it[2].u.operand;
1536 printLocationAndOp(out, exec, location, it, "op_to_index_string");
1537 out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
1538 it += OPCODE_LENGTH(op_to_index_string) - 1;
1541 case op_push_with_scope: {
1542 int dst = (++it)->u.operand;
1543 int newScope = (++it)->u.operand;
1544 int currentScope = (++it)->u.operand;
1545 printLocationAndOp(out, exec, location, it, "push_with_scope");
1546 out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data());
1549 case op_get_parent_scope: {
1550 int dst = (++it)->u.operand;
1551 int parentScope = (++it)->u.operand;
1552 printLocationAndOp(out, exec, location, it, "get_parent_scope");
1553 out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
1556 case op_create_lexical_environment: {
1557 int dst = (++it)->u.operand;
1558 int scope = (++it)->u.operand;
1559 int symbolTable = (++it)->u.operand;
1560 int initialValue = (++it)->u.operand;
1561 printLocationAndOp(out, exec, location, it, "create_lexical_environment");
1562 out.printf("%s, %s, %s, %s",
1563 registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
1567 int r0 = (++it)->u.operand;
1568 int r1 = (++it)->u.operand;
1569 printLocationAndOp(out, exec, location, it, "catch");
1570 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1574 int r0 = (++it)->u.operand;
1575 printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1578 case op_throw_static_error: {
1579 int k0 = (++it)->u.operand;
1580 int k1 = (++it)->u.operand;
1581 printLocationAndOp(out, exec, location, it, "throw_static_error");
1582 out.printf("%s, %s", constantName(k0).data(), k1 ? "true" : "false");
1586 int debugHookID = (++it)->u.operand;
1587 int hasBreakpointFlag = (++it)->u.operand;
1588 printLocationAndOp(out, exec, location, it, "debug");
1589 out.printf("%s, %d", debugHookName(debugHookID), hasBreakpointFlag);
1593 int generator = (++it)->u.operand;
1594 unsigned liveCalleeLocalsIndex = (++it)->u.unsignedValue;
1595 int offset = (++it)->u.operand;
1596 const FastBitVector& liveness = m_rareData->m_liveCalleeLocalsAtYield[liveCalleeLocalsIndex];
1597 printLocationAndOp(out, exec, location, it, "save");
1598 out.printf("%s, ", registerName(generator).data());
1600 out.printf("(@live%1u), %d(->%d)", liveCalleeLocalsIndex, offset, location + offset);
1604 int generator = (++it)->u.operand;
1605 unsigned liveCalleeLocalsIndex = (++it)->u.unsignedValue;
1606 const FastBitVector& liveness = m_rareData->m_liveCalleeLocalsAtYield[liveCalleeLocalsIndex];
1607 printLocationAndOp(out, exec, location, it, "resume");
1608 out.printf("%s, ", registerName(generator).data());
1610 out.printf("(@live%1u)", liveCalleeLocalsIndex);
1614 int condition = (++it)->u.operand;
1615 int line = (++it)->u.operand;
1616 printLocationAndOp(out, exec, location, it, "assert");
1617 out.printf("%s, %d", registerName(condition).data(), line);
1620 case op_profile_will_call: {
1621 int function = (++it)->u.operand;
1622 printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
1625 case op_profile_did_call: {
1626 int function = (++it)->u.operand;
1627 printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
1631 int r0 = (++it)->u.operand;
1632 printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1635 case op_resolve_scope: {
1636 int r0 = (++it)->u.operand;
1637 int scope = (++it)->u.operand;
1638 int id0 = (++it)->u.operand;
1639 ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand);
1640 int depth = (++it)->u.operand;
1641 void* pointer = (++it)->u.pointer;
1642 printLocationAndOp(out, exec, location, it, "resolve_scope");
1643 out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer);
1646 case op_get_from_scope: {
1647 int r0 = (++it)->u.operand;
1648 int r1 = (++it)->u.operand;
1649 int id0 = (++it)->u.operand;
1650 GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
1652 int operand = (++it)->u.operand; // Operand
1653 printLocationAndOp(out, exec, location, it, "get_from_scope");
1654 out.print(registerName(r0), ", ", registerName(r1));
1655 if (static_cast<unsigned>(id0) == UINT_MAX)
1656 out.print(", anonymous");
1658 out.print(", ", idName(id0, identifier(id0)));
1659 out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand);
1660 dumpValueProfiling(out, it, hasPrintedProfiling);
1663 case op_put_to_scope: {
1664 int r0 = (++it)->u.operand;
1665 int id0 = (++it)->u.operand;
1666 int r1 = (++it)->u.operand;
1667 GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
1669 int operand = (++it)->u.operand; // Operand
1670 printLocationAndOp(out, exec, location, it, "put_to_scope");
1671 out.print(registerName(r0));
1672 if (static_cast<unsigned>(id0) == UINT_MAX)
1673 out.print(", anonymous");
1675 out.print(", ", idName(id0, identifier(id0)));
1676 out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand);
1679 case op_get_from_arguments: {
1680 int r0 = (++it)->u.operand;
1681 int r1 = (++it)->u.operand;
1682 int offset = (++it)->u.operand;
1683 printLocationAndOp(out, exec, location, it, "get_from_arguments");
1684 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
1685 dumpValueProfiling(out, it, hasPrintedProfiling);
1688 case op_put_to_arguments: {
1689 int r0 = (++it)->u.operand;
1690 int offset = (++it)->u.operand;
1691 int r1 = (++it)->u.operand;
1692 printLocationAndOp(out, exec, location, it, "put_to_arguments");
1693 out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
1697 RELEASE_ASSERT_NOT_REACHED();
1700 dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1701 dumpResultProfile(out, resultProfileForBytecodeOffset(location), hasPrintedProfiling);
1704 Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1705 if (!exitSites.isEmpty()) {
1706 out.print(" !! frequent exits: ");
1708 for (unsigned i = 0; i < exitSites.size(); ++i)
1709 out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
1711 #else // ENABLE(DFG_JIT)
1712 UNUSED_PARAM(location);
1713 #endif // ENABLE(DFG_JIT)
1717 void CodeBlock::dumpBytecode(
1718 PrintStream& out, unsigned bytecodeOffset,
1719 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
1721 ExecState* exec = m_globalObject->globalExec();
1722 const Instruction* it = instructions().begin() + bytecodeOffset;
1723 dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
1726 #define FOR_EACH_MEMBER_VECTOR(macro) \
1727 macro(instructions) \
1728 macro(callLinkInfos) \
1729 macro(linkedCallerList) \
1730 macro(identifiers) \
1731 macro(functionExpressions) \
1732 macro(constantRegisters)
1734 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1737 macro(exceptionHandlers) \
1738 macro(switchJumpTables) \
1739 macro(stringSwitchJumpTables) \
1740 macro(evalCodeCache) \
1741 macro(expressionInfo) \
1743 macro(callReturnIndexVector)
1745 template<typename T>
1746 static size_t sizeInBytes(const Vector<T>& vector)
1748 return vector.capacity() * sizeof(T);
1753 class PutToScopeFireDetail : public FireDetail {
1755 PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
1756 : m_codeBlock(codeBlock)
1761 void dump(PrintStream& out) const override
1763 out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
1767 CodeBlock* m_codeBlock;
1768 const Identifier& m_ident;
1771 } // anonymous namespace
1773 CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other)
1774 : JSCell(*vm, structure)
1775 , m_globalObject(other.m_globalObject)
1776 , m_numCalleeLocals(other.m_numCalleeLocals)
1777 , m_numVars(other.m_numVars)
1778 , m_shouldAlwaysBeInlined(true)
1780 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1782 , m_didFailFTLCompilation(false)
1783 , m_hasBeenCompiledWithFTL(false)
1784 , m_isConstructor(other.m_isConstructor)
1785 , m_isStrictMode(other.m_isStrictMode)
1786 , m_codeType(other.m_codeType)
1787 , m_unlinkedCode(*other.m_vm, this, other.m_unlinkedCode.get())
1788 , m_hasDebuggerStatement(false)
1789 , m_steppingMode(SteppingModeDisabled)
1790 , m_numBreakpoints(0)
1791 , m_ownerExecutable(*other.m_vm, this, other.m_ownerExecutable.get())
1793 , m_instructions(other.m_instructions)
1794 , m_thisRegister(other.m_thisRegister)
1795 , m_scopeRegister(other.m_scopeRegister)
1796 , m_hash(other.m_hash)
1797 , m_source(other.m_source)
1798 , m_sourceOffset(other.m_sourceOffset)
1799 , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1800 , m_constantRegisters(other.m_constantRegisters)
1801 , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
1802 , m_functionDecls(other.m_functionDecls)
1803 , m_functionExprs(other.m_functionExprs)
1804 , m_osrExitCounter(0)
1805 , m_optimizationDelayCounter(0)
1806 , m_reoptimizationRetryCounter(0)
1807 , m_creationTime(std::chrono::steady_clock::now())
1809 m_visitWeaklyHasBeenCalled.store(false, std::memory_order_relaxed);
1811 ASSERT(heap()->isDeferred());
1812 ASSERT(m_scopeRegister.isLocal());
1814 setNumParameters(other.numParameters());
1817 void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other)
1819 Base::finishCreation(vm);
1821 optimizeAfterWarmUp();
1824 if (other.m_rareData) {
1825 createRareDataIfNecessary();
1827 m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1828 m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1829 m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1830 m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1831 m_rareData->m_liveCalleeLocalsAtYield = other.m_rareData->m_liveCalleeLocalsAtYield;
1834 heap()->m_codeBlocks.add(this);
1837 CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
1838 JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1839 : JSCell(*vm, structure)
1840 , m_globalObject(scope->globalObject()->vm(), this, scope->globalObject())
1841 , m_numCalleeLocals(unlinkedCodeBlock->m_numCalleeLocals)
1842 , m_numVars(unlinkedCodeBlock->m_numVars)
1843 , m_shouldAlwaysBeInlined(true)
1845 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1847 , m_didFailFTLCompilation(false)
1848 , m_hasBeenCompiledWithFTL(false)
1849 , m_isConstructor(unlinkedCodeBlock->isConstructor())
1850 , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1851 , m_codeType(unlinkedCodeBlock->codeType())
1852 , m_unlinkedCode(m_globalObject->vm(), this, unlinkedCodeBlock)
1853 , m_hasDebuggerStatement(false)
1854 , m_steppingMode(SteppingModeDisabled)
1855 , m_numBreakpoints(0)
1856 , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
1857 , m_vm(unlinkedCodeBlock->vm())
1858 , m_thisRegister(unlinkedCodeBlock->thisRegister())
1859 , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
1860 , m_source(sourceProvider)
1861 , m_sourceOffset(sourceOffset)
1862 , m_firstLineColumnOffset(firstLineColumnOffset)
1863 , m_osrExitCounter(0)
1864 , m_optimizationDelayCounter(0)
1865 , m_reoptimizationRetryCounter(0)
1866 , m_creationTime(std::chrono::steady_clock::now())
1868 m_visitWeaklyHasBeenCalled.store(false, std::memory_order_relaxed);
1870 ASSERT(heap()->isDeferred());
1871 ASSERT(m_scopeRegister.isLocal());
1874 setNumParameters(unlinkedCodeBlock->numParameters());
1877 void CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
1880 Base::finishCreation(vm);
1882 if (vm.typeProfiler() || vm.controlFlowProfiler())
1883 vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
1885 setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
1886 if (unlinkedCodeBlock->usesGlobalObject())
1887 m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, this, m_globalObject.get());
1889 for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
1890 LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
1891 if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
1892 m_constantRegisters[registerIndex].set(*m_vm, this, m_globalObject->jsCellForLinkTimeConstant(type));
1895 // We already have the cloned symbol table for the module environment since we need to instantiate
1896 // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
1897 if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(unlinkedCodeBlock)) {
1898 SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
1899 if (m_vm->typeProfiler()) {
1900 ConcurrentJITLocker locker(clonedSymbolTable->m_lock);
1901 clonedSymbolTable->prepareForTypeProfiling(locker);
1903 replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
1906 bool shouldUpdateFunctionHasExecutedCache = vm.typeProfiler() || vm.controlFlowProfiler();
1907 m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls());
1908 for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1909 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1910 if (shouldUpdateFunctionHasExecutedCache)
1911 vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1912 m_functionDecls[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1915 m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs());
1916 for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1917 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1918 if (shouldUpdateFunctionHasExecutedCache)
1919 vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1920 m_functionExprs[i].set(*m_vm, this, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1923 if (unlinkedCodeBlock->hasRareData()) {
1924 createRareDataIfNecessary();
1925 if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1926 m_rareData->m_constantBuffers.grow(count);
1927 for (size_t i = 0; i < count; i++) {
1928 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1929 m_rareData->m_constantBuffers[i] = buffer;
1932 if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1933 m_rareData->m_exceptionHandlers.resizeToFit(count);
1934 for (size_t i = 0; i < count; i++) {
1935 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
1936 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
1938 handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
1940 handler.initialize(unlinkedHandler);
1945 if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1946 m_rareData->m_stringSwitchJumpTables.grow(count);
1947 for (size_t i = 0; i < count; i++) {
1948 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1949 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1950 for (; ptr != end; ++ptr) {
1951 OffsetLocation offset;
1952 offset.branchOffset = ptr->value;
1953 m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1958 if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1959 m_rareData->m_switchJumpTables.grow(count);
1960 for (size_t i = 0; i < count; i++) {
1961 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1962 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1963 destTable.branchOffsets = sourceTable.branchOffsets;
1964 destTable.min = sourceTable.min;
1969 // Allocate metadata buffers for the bytecode
1970 if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1971 m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);
1972 if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1973 m_arrayProfiles.grow(size);
1974 if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1975 m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);
1976 if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1977 m_valueProfiles = RefCountedArray<ValueProfile>(size);
1978 if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1979 m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);
1982 setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
1985 // Copy and translate the UnlinkedInstructions
1986 unsigned instructionCount = unlinkedCodeBlock->instructions().count();
1987 UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
1989 // Bookkeep the strongly referenced module environments.
1990 HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
1992 // Bookkeep the merge point bytecode offsets.
1993 Vector<size_t> mergePointBytecodeOffsets;
1995 RefCountedArray<Instruction> instructions(instructionCount);
1997 for (unsigned i = 0; !instructionReader.atEnd(); ) {
1998 const UnlinkedInstruction* pc = instructionReader.next();
2000 unsigned opLength = opcodeLength(pc[0].u.opcode);
2002 instructions[i] = vm.interpreter->getOpcode(pc[0].u.opcode);
2003 for (size_t j = 1; j < opLength; ++j) {
2004 if (sizeof(int32_t) != sizeof(intptr_t))
2005 instructions[i + j].u.pointer = 0;
2006 instructions[i + j].u.operand = pc[j].u.operand;
2008 switch (pc[0].u.opcode) {
2009 case op_has_indexed_property: {
2010 int arrayProfileIndex = pc[opLength - 1].u.operand;
2011 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2013 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
2016 case op_call_varargs:
2017 case op_tail_call_varargs:
2018 case op_construct_varargs:
2019 case op_get_by_val: {
2020 int arrayProfileIndex = pc[opLength - 2].u.operand;
2021 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2023 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
2026 case op_get_direct_pname:
2028 case op_get_from_arguments: {
2029 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
2030 ASSERT(profile->m_bytecodeOffset == -1);
2031 profile->m_bytecodeOffset = i;
2032 instructions[i + opLength - 1] = profile;
2035 case op_put_by_val: {
2036 int arrayProfileIndex = pc[opLength - 1].u.operand;
2037 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2038 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
2041 case op_put_by_val_direct: {
2042 int arrayProfileIndex = pc[opLength - 1].u.operand;
2043 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2044 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
2049 case op_new_array_buffer:
2050 case op_new_array_with_size: {
2051 int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
2052 instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
2055 case op_new_object: {
2056 int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
2057 ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
2058 int inferredInlineCapacity = pc[opLength - 2].u.operand;
2060 instructions[i + opLength - 1] = objectAllocationProfile;
2061 objectAllocationProfile->initialize(vm,
2062 this, m_globalObject->objectPrototype(), inferredInlineCapacity);
2068 case op_call_eval: {
2069 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
2070 ASSERT(profile->m_bytecodeOffset == -1);
2071 profile->m_bytecodeOffset = i;
2072 instructions[i + opLength - 1] = profile;
2073 int arrayProfileIndex = pc[opLength - 2].u.operand;
2074 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
2075 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
2076 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
2079 case op_construct: {
2080 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
2081 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
2082 ASSERT(profile->m_bytecodeOffset == -1);
2083 profile->m_bytecodeOffset = i;
2084 instructions[i + opLength - 1] = profile;
2087 case op_get_array_length:
2090 case op_resolve_scope: {
2091 const Identifier& ident = identifier(pc[3].u.operand);
2092 ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
2093 RELEASE_ASSERT(type != LocalClosureVar);
2094 int localScopeDepth = pc[5].u.operand;
2096 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
2097 instructions[i + 4].u.operand = op.type;
2098 instructions[i + 5].u.operand = op.depth;
2099 if (op.lexicalEnvironment) {
2100 if (op.type == ModuleVar) {
2101 // Keep the linked module environment strongly referenced.
2102 if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
2103 addConstant(op.lexicalEnvironment);
2104 instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);
2106 instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());
2107 } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
2108 instructions[i + 6].u.jsCell.set(vm, this, constantScope);
2110 instructions[i + 6].u.pointer = nullptr;
2114 case op_get_from_scope: {
2115 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
2116 ASSERT(profile->m_bytecodeOffset == -1);
2117 profile->m_bytecodeOffset = i;
2118 instructions[i + opLength - 1] = profile;
2120 // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
2122 int localScopeDepth = pc[5].u.operand;
2123 instructions[i + 5].u.pointer = nullptr;
2125 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
2126 ASSERT(!isInitialization(getPutInfo.initializationMode()));
2127 if (getPutInfo.resolveType() == LocalClosureVar) {
2128 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
2132 const Identifier& ident = identifier(pc[3].u.operand);
2133 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization);
2135 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
2136 if (op.type == ModuleVar)
2137 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
2138 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
2139 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2140 else if (op.structure)
2141 instructions[i + 5].u.structure.set(vm, this, op.structure);
2142 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2146 case op_put_to_scope: {
2147 // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
2148 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
2149 if (getPutInfo.resolveType() == LocalClosureVar) {
2150 // Only do watching if the property we're putting to is not anonymous.
2151 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
2152 int symbolTableIndex = pc[5].u.operand;
2153 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
2154 const Identifier& ident = identifier(pc[2].u.operand);
2155 ConcurrentJITLocker locker(symbolTable->m_lock);
2156 auto iter = symbolTable->find(locker, ident.impl());
2157 ASSERT(iter != symbolTable->end(locker));
2158 iter->value.prepareToWatch();
2159 instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
2161 instructions[i + 5].u.watchpointSet = nullptr;
2165 const Identifier& ident = identifier(pc[2].u.operand);
2166 int localScopeDepth = pc[5].u.operand;
2167 instructions[i + 5].u.pointer = nullptr;
2168 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
2170 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
2171 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
2172 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2173 else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
2174 if (op.watchpointSet)
2175 op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident));
2176 } else if (op.structure)
2177 instructions[i + 5].u.structure.set(vm, this, op.structure);
2178 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2183 case op_profile_type: {
2184 RELEASE_ASSERT(vm.typeProfiler());
2185 // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
2186 size_t instructionOffset = i + opLength - 1;
2187 unsigned divotStart, divotEnd;
2188 GlobalVariableID globalVariableID = 0;
2189 RefPtr<TypeSet> globalTypeSet;
2190 bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
2191 VirtualRegister profileRegister(pc[1].u.operand);
2192 ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
2193 SymbolTable* symbolTable = nullptr;
2196 case ProfileTypeBytecodeClosureVar: {
2197 const Identifier& ident = identifier(pc[4].u.operand);
2198 int localScopeDepth = pc[2].u.operand;
2199 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
2200 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
2201 // we're abstractly "read"ing from a JSScope.
2202 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);
2204 if (op.type == ClosureVar || op.type == ModuleVar)
2205 symbolTable = op.lexicalEnvironment->symbolTable();
2206 else if (op.type == GlobalVar)
2207 symbolTable = m_globalObject.get()->symbolTable();
2209 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
2211 ConcurrentJITLocker locker(symbolTable->m_lock);
2212 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2213 symbolTable->prepareForTypeProfiling(locker);
2214 globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm);
2215 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm);
2217 globalVariableID = TypeProfilerNoGlobalIDExists;
2221 case ProfileTypeBytecodeLocallyResolved: {
2222 int symbolTableIndex = pc[2].u.operand;
2223 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
2224 const Identifier& ident = identifier(pc[4].u.operand);
2225 ConcurrentJITLocker locker(symbolTable->m_lock);
2226 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2227 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm);
2228 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm);
2232 case ProfileTypeBytecodeDoesNotHaveGlobalID:
2233 case ProfileTypeBytecodeFunctionArgument: {
2234 globalVariableID = TypeProfilerNoGlobalIDExists;
2237 case ProfileTypeBytecodeFunctionReturnStatement: {
2238 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
2239 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
2240 globalVariableID = TypeProfilerReturnStatement;
2241 if (!shouldAnalyze) {
2242 // Because a return statement can be added implicitly to return undefined at the end of a function,
2243 // and these nodes don't emit expression ranges because they aren't in the actual source text of
2244 // the user's program, give the type profiler some range to identify these return statements.
2245 // Currently, the text offset that is used as identification is "f" in the function keyword
2246 // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
2247 divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
2248 shouldAnalyze = true;
2254 std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
2255 ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, &vm);
2256 TypeLocation* location = locationPair.first;
2257 bool isNewLocation = locationPair.second;
2259 if (flag == ProfileTypeBytecodeFunctionReturnStatement)
2260 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
2262 if (shouldAnalyze && isNewLocation)
2263 vm.typeProfiler()->insertNewLocation(location);
2265 instructions[i + 2].u.location = location;
2270 if (pc[1].u.index == DidReachBreakpoint)
2271 m_hasDebuggerStatement = true;
2276 unsigned liveCalleeLocalsIndex = pc[2].u.index;
2277 int offset = pc[3].u.operand;
2278 if (liveCalleeLocalsIndex >= mergePointBytecodeOffsets.size())
2279 mergePointBytecodeOffsets.resize(liveCalleeLocalsIndex + 1);
2280 mergePointBytecodeOffsets[liveCalleeLocalsIndex] = i + offset;
2290 if (vm.controlFlowProfiler())
2291 insertBasicBlockBoundariesForControlFlowProfiler(instructions);
2293 m_instructions = WTFMove(instructions);
2295 // Perform bytecode liveness analysis to determine which locals are live and should be resumed when executing op_resume.
2296 if (unlinkedCodeBlock->parseMode() == SourceParseMode::GeneratorBodyMode) {
2297 if (size_t count = mergePointBytecodeOffsets.size()) {
2298 createRareDataIfNecessary();
2299 BytecodeLivenessAnalysis liveness(this);
2300 m_rareData->m_liveCalleeLocalsAtYield.grow(count);
2301 size_t liveCalleeLocalsIndex = 0;
2302 for (size_t bytecodeOffset : mergePointBytecodeOffsets) {
2303 m_rareData->m_liveCalleeLocalsAtYield[liveCalleeLocalsIndex] = liveness.getLivenessInfoAtBytecodeOffset(bytecodeOffset);
2304 ++liveCalleeLocalsIndex;
2309 // Set optimization thresholds only after m_instructions is initialized, since these
2310 // rely on the instruction count (and are in theory permitted to also inspect the
2311 // instruction stream to more accurate assess the cost of tier-up).
2312 optimizeAfterWarmUp();
2315 // If the concurrent thread will want the code block's hash, then compute it here
2317 if (Options::alwaysComputeHash())
2320 if (Options::dumpGeneratedBytecodes())
2323 heap()->m_codeBlocks.add(this);
2324 heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction));
2327 #if ENABLE(WEBASSEMBLY)
2328 CodeBlock::CodeBlock(VM* vm, Structure* structure, WebAssemblyExecutable* ownerExecutable, JSGlobalObject* globalObject)
2329 : JSCell(*vm, structure)
2330 , m_globalObject(globalObject->vm(), this, globalObject)
2331 , m_numCalleeLocals(0)
2333 , m_shouldAlwaysBeInlined(false)
2335 , m_capabilityLevelState(DFG::CannotCompile)
2337 , m_didFailFTLCompilation(false)
2338 , m_hasBeenCompiledWithFTL(false)
2339 , m_isConstructor(false)
2340 , m_isStrictMode(false)
2341 , m_codeType(FunctionCode)
2342 , m_hasDebuggerStatement(false)
2343 , m_steppingMode(SteppingModeDisabled)
2344 , m_numBreakpoints(0)
2345 , m_ownerExecutable(m_globalObject->vm(), this, ownerExecutable)
2347 , m_osrExitCounter(0)
2348 , m_optimizationDelayCounter(0)
2349 , m_reoptimizationRetryCounter(0)
2350 , m_creationTime(std::chrono::steady_clock::now())
2352 ASSERT(heap()->isDeferred());
2355 void CodeBlock::finishCreation(VM& vm, WebAssemblyExecutable*, JSGlobalObject*)
2357 Base::finishCreation(vm);
2359 heap()->m_codeBlocks.add(this);
2363 CodeBlock::~CodeBlock()
2365 if (m_vm->m_perBytecodeProfiler)
2366 m_vm->m_perBytecodeProfiler->notifyDestruction(this);
2368 #if ENABLE(VERBOSE_VALUE_PROFILE)
2369 dumpValueProfiles();
2372 // We may be destroyed before any CodeBlocks that refer to us are destroyed.
2373 // Consider that two CodeBlocks become unreachable at the same time. There
2374 // is no guarantee about the order in which the CodeBlocks are destroyed.
2375 // So, if we don't remove incoming calls, and get destroyed before the
2376 // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
2377 // destructor will try to remove nodes from our (no longer valid) linked list.
2378 unlinkIncomingCalls();
2380 // Note that our outgoing calls will be removed from other CodeBlocks'
2381 // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
2385 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2386 StructureStubInfo* stub = *iter;
2390 #endif // ENABLE(JIT)
2393 void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
2395 ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
2396 size_t count = constants.size();
2397 m_constantRegisters.resizeToFit(count);
2398 bool hasTypeProfiler = !!m_vm->typeProfiler();
2399 for (size_t i = 0; i < count; i++) {
2400 JSValue constant = constants[i].get();
2402 if (!constant.isEmpty()) {
2403 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(constant)) {
2404 if (hasTypeProfiler) {
2405 ConcurrentJITLocker locker(symbolTable->m_lock);
2406 symbolTable->prepareForTypeProfiling(locker);
2408 constant = symbolTable->cloneScopePart(*m_vm);
2412 m_constantRegisters[i].set(*m_vm, this, constant);
2415 m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
2418 void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative)
2420 m_alternative.set(vm, this, alternative);
2423 void CodeBlock::setNumParameters(int newValue)
2425 m_numParameters = newValue;
2427 m_argumentValueProfiles = RefCountedArray<ValueProfile>(newValue);
2430 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
2432 EvalCacheMap::iterator end = m_cacheMap.end();
2433 for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
2434 visitor.append(&ptr->value);
2437 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
2440 if (jitType() != JITCode::DFGJIT)
2442 DFG::JITCode* jitCode = m_jitCode->dfg();
2443 return jitCode->osrEntryBlock();
2444 #else // ENABLE(FTL_JIT)
2446 #endif // ENABLE(FTL_JIT)
2449 void CodeBlock::visitWeakly(SlotVisitor& visitor)
2451 bool setByMe = m_visitWeaklyHasBeenCalled.compareExchangeStrong(false, true);
2455 if (Heap::isMarked(this))
2458 if (shouldVisitStrongly()) {
2459 visitor.appendUnbarrieredReadOnlyPointer(this);
2463 // There are two things that may use unconditional finalizers: inline cache clearing
2464 // and jettisoning. The probability of us wanting to do at least one of those things
2465 // is probably quite close to 1. So we add one no matter what and when it runs, it
2466 // figures out whether it has any work to do.
2467 visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
2469 if (!JITCode::isOptimizingJIT(jitType()))
2472 // If we jettison ourselves we'll install our alternative, so make sure that it
2473 // survives GC even if we don't.
2474 visitor.append(&m_alternative);
2476 // There are two things that we use weak reference harvesters for: DFG fixpoint for
2477 // jettisoning, and trying to find structures that would be live based on some
2478 // inline cache. So it makes sense to register them regardless.
2479 visitor.addWeakReferenceHarvester(&m_weakReferenceHarvester);
2482 // We get here if we're live in the sense that our owner executable is live,
2483 // but we're not yet live for sure in another sense: we may yet decide that this
2484 // code block should be jettisoned based on its outgoing weak references being
2485 // stale. Set a flag to indicate that we're still assuming that we're dead, and
2486 // perform one round of determining if we're live. The GC may determine, based on
2487 // either us marking additional objects, or by other objects being marked for
2488 // other reasons, that this iteration should run again; it will notify us of this
2489 // decision by calling harvestWeakReferences().
2491 m_allTransitionsHaveBeenMarked = false;
2492 propagateTransitions(visitor);
2494 m_jitCode->dfgCommon()->livenessHasBeenProved = false;
2495 determineLiveness(visitor);
2496 #endif // ENABLE(DFG_JIT)
2499 size_t CodeBlock::estimatedSize(JSCell* cell)
2501 CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
2502 size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction);
2503 if (thisObject->m_jitCode)
2504 extraMemoryAllocated += thisObject->m_jitCode->size();
2505 return Base::estimatedSize(cell) + extraMemoryAllocated;
2508 void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor)
2510 CodeBlock* thisObject = jsCast<CodeBlock*>(cell);
2511 ASSERT_GC_OBJECT_INHERITS(thisObject, info());
2512 JSCell::visitChildren(thisObject, visitor);
2513 thisObject->visitChildren(visitor);
2516 void CodeBlock::visitChildren(SlotVisitor& visitor)
2518 // There are two things that may use unconditional finalizers: inline cache clearing
2519 // and jettisoning. The probability of us wanting to do at least one of those things
2520 // is probably quite close to 1. So we add one no matter what and when it runs, it
2521 // figures out whether it has any work to do.
2522 visitor.addUnconditionalFinalizer(&m_unconditionalFinalizer);
2524 if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2525 visitor.appendUnbarrieredReadOnlyPointer(otherBlock);
2528 visitor.reportExtraMemoryVisited(m_jitCode->size());
2529 if (m_instructions.size())
2530 visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
2532 stronglyVisitStrongReferences(visitor);
2533 stronglyVisitWeakReferences(visitor);
2535 m_allTransitionsHaveBeenMarked = false;
2536 propagateTransitions(visitor);
2539 bool CodeBlock::shouldVisitStrongly()
2541 if (Options::forceCodeBlockLiveness())
2544 if (shouldJettisonDueToOldAge())
2547 // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
2548 // their weak references go stale. So if a basline JIT CodeBlock gets
2549 // scanned, we can assume that this means that it's live.
2550 if (!JITCode::isOptimizingJIT(jitType()))
2556 bool CodeBlock::shouldJettisonDueToWeakReference()
2558 if (!JITCode::isOptimizingJIT(jitType()))
2560 return !Heap::isMarked(this);
2563 bool CodeBlock::shouldJettisonDueToOldAge()
2569 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
2571 if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
2574 if (!Heap::isMarked(transition.m_from.get()))
2579 #endif // ENABLE(DFG_JIT)
2581 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
2583 UNUSED_PARAM(visitor);
2585 if (m_allTransitionsHaveBeenMarked)
2588 bool allAreMarkedSoFar = true;
2590 Interpreter* interpreter = m_vm->interpreter;
2591 if (jitType() == JITCode::InterpreterThunk) {
2592 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2593 for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2594 Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2595 switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2596 case op_put_by_id: {
2597 StructureID oldStructureID = instruction[4].u.structureID;
2598 StructureID newStructureID = instruction[6].u.structureID;
2599 if (!oldStructureID || !newStructureID)
2601 Structure* oldStructure =
2602 m_vm->heap.structureIDTable().get(oldStructureID);
2603 Structure* newStructure =
2604 m_vm->heap.structureIDTable().get(newStructureID);
2605 if (Heap::isMarked(oldStructure))
2606 visitor.appendUnbarrieredReadOnlyPointer(newStructure);
2608 allAreMarkedSoFar = false;
2618 if (JITCode::isJIT(jitType())) {
2619 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2620 StructureStubInfo& stubInfo = **iter;
2621 if (stubInfo.cacheType != CacheType::Stub)
2623 PolymorphicAccess* list = stubInfo.u.stub;
2624 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2625 if (origin && !Heap::isMarked(origin)) {
2626 allAreMarkedSoFar = false;
2629 for (unsigned j = list->size(); j--;) {
2630 const AccessCase& access = list->at(j);
2631 if (access.type() != AccessCase::Transition)
2633 if (Heap::isMarked(access.structure()))
2634 visitor.appendUnbarrieredReadOnlyPointer(access.newStructure());
2636 allAreMarkedSoFar = false;
2640 #endif // ENABLE(JIT)
2643 if (JITCode::isOptimizingJIT(jitType())) {
2644 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2646 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2647 if (shouldMarkTransition(dfgCommon->transitions[i])) {
2648 // If the following three things are live, then the target of the
2649 // transition is also live:
2651 // - This code block. We know it's live already because otherwise
2652 // we wouldn't be scanning ourselves.
2654 // - The code origin of the transition. Transitions may arise from
2655 // code that was inlined. They are not relevant if the user's
2656 // object that is required for the inlinee to run is no longer
2659 // - The source of the transition. The transition checks if some
2660 // heap location holds the source, and if so, stores the target.
2661 // Hence the source must be live for the transition to be live.
2663 // We also short-circuit the liveness if the structure is harmless
2664 // to mark (i.e. its global object and prototype are both already
2667 visitor.append(&dfgCommon->transitions[i].m_to);
2669 allAreMarkedSoFar = false;
2672 #endif // ENABLE(DFG_JIT)
2674 if (allAreMarkedSoFar)
2675 m_allTransitionsHaveBeenMarked = true;
2678 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2680 UNUSED_PARAM(visitor);
2683 // Check if we have any remaining work to do.
2684 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2685 if (dfgCommon->livenessHasBeenProved)
2688 // Now check all of our weak references. If all of them are live, then we
2689 // have proved liveness and so we scan our strong references. If at end of
2690 // GC we still have not proved liveness, then this code block is toast.
2691 bool allAreLiveSoFar = true;
2692 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2693 if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2694 allAreLiveSoFar = false;
2698 if (allAreLiveSoFar) {
2699 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
2700 if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
2701 allAreLiveSoFar = false;
2707 // If some weak references are dead, then this fixpoint iteration was
2709 if (!allAreLiveSoFar)
2712 // All weak references are live. Record this information so we don't
2713 // come back here again, and scan the strong references.
2714 dfgCommon->livenessHasBeenProved = true;
2715 visitor.appendUnbarrieredReadOnlyPointer(this);
2716 #endif // ENABLE(DFG_JIT)
2719 void CodeBlock::WeakReferenceHarvester::visitWeakReferences(SlotVisitor& visitor)
2721 CodeBlock* codeBlock =
2722 bitwise_cast<CodeBlock*>(
2723 bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_weakReferenceHarvester));
2725 codeBlock->propagateTransitions(visitor);
2726 codeBlock->determineLiveness(visitor);
2729 void CodeBlock::finalizeLLIntInlineCaches()
2731 #if ENABLE(WEBASSEMBLY)
2732 if (m_ownerExecutable->isWebAssemblyExecutable())
2736 Interpreter* interpreter = m_vm->interpreter;
2737 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2738 for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2739 Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2740 switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2741 case op_get_by_id: {
2742 StructureID oldStructureID = curInstruction[4].u.structureID;
2743 if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
2745 if (Options::verboseOSR())
2746 dataLogF("Clearing LLInt property access.\n");
2747 curInstruction[4].u.structureID = 0;
2748 curInstruction[5].u.operand = 0;
2751 case op_put_by_id: {
2752 StructureID oldStructureID = curInstruction[4].u.structureID;
2753 StructureID newStructureID = curInstruction[6].u.structureID;
2754 StructureChain* chain = curInstruction[7].u.structureChain.get();
2755 if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
2756 (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
2757 (!chain || Heap::isMarked(chain)))
2759 if (Options::verboseOSR())
2760 dataLogF("Clearing LLInt put transition.\n");
2761 curInstruction[4].u.structureID = 0;
2762 curInstruction[5].u.operand = 0;
2763 curInstruction[6].u.structureID = 0;
2764 curInstruction[7].u.structureChain.clear();
2767 case op_get_array_length:
2770 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2772 if (Options::verboseOSR())
2773 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2774 curInstruction[2].u.structure.clear();
2775 curInstruction[3].u.toThisStatus = merge(
2776 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
2778 case op_create_this: {
2779 auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
2780 if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
2782 JSCell* cachedFunction = cacheWriteBarrier.get();
2783 if (Heap::isMarked(cachedFunction))
2785 if (Options::verboseOSR())
2786 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
2787 cacheWriteBarrier.clear();
2790 case op_resolve_scope: {
2791 // Right now this isn't strictly necessary. Any symbol tables that this will refer to
2792 // are for outer functions, and we refer to those functions strongly, and they refer
2793 // to the symbol table strongly. But it's nice to be on the safe side.
2794 WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
2795 if (!symbolTable || Heap::isMarked(symbolTable.get()))
2797 if (Options::verboseOSR())
2798 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
2799 symbolTable.clear();
2802 case op_get_from_scope:
2803 case op_put_to_scope: {
2804 GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
2805 if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
2806 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
2808 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2809 if (!structure || Heap::isMarked(structure.get()))
2811 if (Options::verboseOSR())
2812 dataLogF("Clearing scope access with structure %p.\n", structure.get());
2817 OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
2818 ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
2822 for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2823 if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2824 if (Options::verboseOSR())
2825 dataLog("Clearing LLInt call from ", *this, "\n");
2826 m_llintCallLinkInfos[i].unlink();
2828 if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2829 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2833 void CodeBlock::finalizeBaselineJITInlineCaches()
2836 for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
2837 (*iter)->visitWeak(*vm());
2839 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2840 StructureStubInfo& stubInfo = **iter;
2841 stubInfo.visitWeakReferences(this);
2846 void CodeBlock::UnconditionalFinalizer::finalizeUnconditionally()
2848 CodeBlock* codeBlock = bitwise_cast<CodeBlock*>(
2849 bitwise_cast<char*>(this) - OBJECT_OFFSETOF(CodeBlock, m_unconditionalFinalizer));
2852 if (codeBlock->shouldJettisonDueToWeakReference()) {
2853 codeBlock->jettison(Profiler::JettisonDueToWeakReference);
2856 #endif // ENABLE(DFG_JIT)
2858 if (codeBlock->shouldJettisonDueToOldAge()) {
2859 codeBlock->jettison(Profiler::JettisonDueToOldAge);
2863 if (JITCode::couldBeInterpreted(codeBlock->jitType()))
2864 codeBlock->finalizeLLIntInlineCaches();
2867 if (!!codeBlock->jitCode())
2868 codeBlock->finalizeBaselineJITInlineCaches();
2872 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
2875 toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2877 UNUSED_PARAM(result);
2881 void CodeBlock::getStubInfoMap(StubInfoMap& result)
2883 ConcurrentJITLocker locker(m_lock);
2884 getStubInfoMap(locker, result);
2887 void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
2890 toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
2892 UNUSED_PARAM(result);
2896 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
2898 ConcurrentJITLocker locker(m_lock);
2899 getCallLinkInfoMap(locker, result);
2902 void CodeBlock::getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result)
2905 for (auto* byValInfo : m_byValInfos)
2906 result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
2908 UNUSED_PARAM(result);
2912 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
2914 ConcurrentJITLocker locker(m_lock);
2915 getByValInfoMap(locker, result);
2919 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
2921 ConcurrentJITLocker locker(m_lock);
2922 return m_stubInfos.add(accessType);
2925 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
2927 for (StructureStubInfo* stubInfo : m_stubInfos) {
2928 if (stubInfo->codeOrigin == codeOrigin)
2934 ByValInfo* CodeBlock::addByValInfo()
2936 ConcurrentJITLocker locker(m_lock);
2937 return m_byValInfos.add();
2940 CallLinkInfo* CodeBlock::addCallLinkInfo()
2942 ConcurrentJITLocker locker(m_lock);
2943 return m_callLinkInfos.add();
2946 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
2948 for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2949 if ((*iter)->codeOrigin() == CodeOrigin(index))
2956 void CodeBlock::visitOSRExitTargets(SlotVisitor& visitor)
2958 // We strongly visit OSR exits targets because we don't want to deal with
2959 // the complexity of generating an exit target CodeBlock on demand and
2960 // guaranteeing that it matches the details of the CodeBlock we compiled
2961 // the OSR exit against.
2963 visitor.append(&m_alternative);
2966 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2967 if (dfgCommon->inlineCallFrames) {
2968 for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
2969 ASSERT(inlineCallFrame->baselineCodeBlock);
2970 visitor.append(&inlineCallFrame->baselineCodeBlock);
2976 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2978 visitor.append(&m_globalObject);
2979 visitor.append(&m_ownerExecutable);
2980 visitor.append(&m_unlinkedCode);
2982 m_rareData->m_evalCodeCache.visitAggregate(visitor);
2983 visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2984 for (size_t i = 0; i < m_functionExprs.size(); ++i)
2985 visitor.append(&m_functionExprs[i]);
2986 for (size_t i = 0; i < m_functionDecls.size(); ++i)
2987 visitor.append(&m_functionDecls[i]);
2988 for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2989 m_objectAllocationProfiles[i].visitAggregate(visitor);
2992 if (JITCode::isOptimizingJIT(jitType()))
2993 visitOSRExitTargets(visitor);
2996 updateAllPredictions();
2999 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
3001 UNUSED_PARAM(visitor);
3004 if (!JITCode::isOptimizingJIT(jitType()))
3007 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
3009 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
3010 if (!!dfgCommon->transitions[i].m_codeOrigin)
3011 visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
3012 visitor.append(&dfgCommon->transitions[i].m_from);
3013 visitor.append(&dfgCommon->transitions[i].m_to);
3016 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
3017 visitor.append(&dfgCommon->weakReferences[i]);
3019 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
3020 visitor.append(&dfgCommon->weakStructureReferences[i]);
3022 dfgCommon->livenessHasBeenProved = true;
3026 CodeBlock* CodeBlock::baselineAlternative()
3029 CodeBlock* result = this;
3030 while (result->alternative())
3031 result = result->alternative();
3032 RELEASE_ASSERT(result);
3033 RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
3040 CodeBlock* CodeBlock::baselineVersion()
3043 if (JITCode::isBaselineCode(jitType()))
3045 CodeBlock* result = replacement();
3047 // This can happen if we're creating the original CodeBlock for an executable.
3048 // Assume that we're the baseline CodeBlock.
3049 RELEASE_ASSERT(jitType() == JITCode::None);
3052 result = result->baselineAlternative();
3060 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
3062 return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
3065 bool CodeBlock::hasOptimizedReplacement()
3067 return hasOptimizedReplacement(jitType());
3071 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
3073 RELEASE_ASSERT(bytecodeOffset < instructions().size());
3074 return handlerForIndex(bytecodeOffset, requiredHandler);
3077 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
3082 Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
3083 for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
3084 HandlerInfo& handler = exceptionHandlers[i];
3085 if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
3088 // Handlers are ordered innermost first, so the first handler we encounter
3089 // that contains the source address is the correct handler to use.
3090 // This index used is either the BytecodeOffset or a CallSiteIndex.
3091 if (handler.start <= index && handler.end > index)
3098 CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
3101 RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
3102 RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
3103 ASSERT(!!handlerForIndex(originalCallSite.bits()));
3104 CodeOrigin originalOrigin = codeOrigin(originalCallSite);
3105 return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin);
3107 // We never create new on-the-fly exception handling
3108 // call sites outside the DFG/FTL inline caches.
3109 UNUSED_PARAM(originalCallSite);
3110 RELEASE_ASSERT_NOT_REACHED();
3111 return CallSiteIndex(0u);
3115 void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
3117 RELEASE_ASSERT(m_rareData);
3118 Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
3119 unsigned index = callSiteIndex.bits();
3120 for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
3121 HandlerInfo& handler = exceptionHandlers[i];
3122 if (handler.start <= index && handler.end > index) {
3123 exceptionHandlers.remove(i);
3128 RELEASE_ASSERT_NOT_REACHED();
3131 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
3133 RELEASE_ASSERT(bytecodeOffset < instructions().size());
3134 return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
3137 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
3144 expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
3148 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
3150 m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
3151 divot += m_sourceOffset;
3152 column += line ? 1 : firstLineColumnOffset();
3153 line += ownerScriptExecutable()->firstLine();
3156 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
3158 Interpreter* interpreter = vm()->interpreter;
3159 const Instruction* begin = instructions().begin();
3160 const Instruction* end = instructions().end();
3161 for (const Instruction* it = begin; it != end;) {
3162 OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
3163 if (opcodeID == op_debug) {
3164 unsigned bytecodeOffset = it - begin;
3166 unsigned opDebugLine;
3167 unsigned opDebugColumn;
3168 expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
3169 if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
3172 it += opcodeLengths[opcodeID];
3177 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
3179 m_rareCaseProfiles.shrinkToFit();
3180 m_resultProfiles.shrinkToFit();
3182 if (shrinkMode == EarlyShrink) {
3183 m_constantRegisters.shrinkToFit();
3184 m_constantsSourceCodeRepresentation.shrinkToFit();
3187 m_rareData->m_switchJumpTables.shrinkToFit();
3188 m_rareData->m_stringSwitchJumpTables.shrinkToFit();
3189 m_rareData->m_liveCalleeLocalsAtYield.shrinkToFit();
3191 } // else don't shrink these, because we would have already pointed pointers into these tables.
3195 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
3197 noticeIncomingCall(callerFrame);
3198 m_incomingCalls.push(incoming);
3201 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
3203 noticeIncomingCall(callerFrame);
3204 m_incomingPolymorphicCalls.push(incoming);
3206 #endif // ENABLE(JIT)
3208 void CodeBlock::unlinkIncomingCalls()
3210 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
3211 m_incomingLLIntCalls.begin()->unlink();
3213 while (m_incomingCalls.begin() != m_incomingCalls.end())
3214 m_incomingCalls.begin()->unlink(*vm());
3215 while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
3216 m_incomingPolymorphicCalls.begin()->unlink(*vm());
3217 #endif // ENABLE(JIT)
3220 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
3222 noticeIncomingCall(callerFrame);
3223 m_incomingLLIntCalls.push(incoming);
3226 CodeBlock* CodeBlock::newReplacement()
3228 return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
3232 CodeBlock* CodeBlock::replacement()
3234 const ClassInfo* classInfo = this->classInfo();
3236 if (classInfo == FunctionCodeBlock::info())
3237 return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
3239 if (classInfo == EvalCodeBlock::info())
3240 return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
3242 if (classInfo == ProgramCodeBlock::info())
3243 return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
3245 if (classInfo == ModuleProgramCodeBlock::info())
3246 return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
3248 #if ENABLE(WEBASSEMBLY)
3249 if (classInfo == WebAssemblyCodeBlock::info())
3253 RELEASE_ASSERT_NOT_REACHED();
3257 DFG::CapabilityLevel CodeBlock::computeCapabilityLevel()
3259 const ClassInfo* classInfo = this->classInfo();
3261 if (classInfo == FunctionCodeBlock::info()) {
3262 if (m_isConstructor)
3263 return DFG::functionForConstructCapabilityLevel(this);
3264 return DFG::functionForCallCapabilityLevel(this);
3267 if (classInfo == EvalCodeBlock::info())
3268 return DFG::evalCapabilityLevel(this);
3270 if (classInfo == ProgramCodeBlock::info())
3271 return DFG::programCapabilityLevel(this);
3273 if (classInfo == ModuleProgramCodeBlock::info())
3274 return DFG::programCapabilityLevel(this);
3276 #if ENABLE(WEBASSEMBLY)
3277 if (classInfo == WebAssemblyCodeBlock::info())
3278 return DFG::CannotCompile;
3281 RELEASE_ASSERT_NOT_REACHED();
3282 return DFG::CannotCompile;
3285 #endif // ENABLE(JIT)
3287 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
3289 #if !ENABLE(DFG_JIT)
3291 UNUSED_PARAM(detail);
3294 RELEASE_ASSERT(reason != Profiler::NotJettisoned);
3297 if (DFG::shouldDumpDisassembly()) {
3298 dataLog("Jettisoning ", *this);
3299 if (mode == CountReoptimization)
3300 dataLog(" and counting reoptimization");
3301 dataLog(" due to ", reason);
3303 dataLog(", ", *detail);
3307 if (reason == Profiler::JettisonDueToWeakReference) {
3308 if (DFG::shouldDumpDisassembly()) {
3309 dataLog(*this, " will be jettisoned because of the following dead references:\n");
3310 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
3311 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
3312 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
3313 JSCell* origin = transition.m_codeOrigin.get();
3314 JSCell* from = transition.m_from.get();
3315 JSCell* to = transition.m_to.get();
3316 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
3318 dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
3320 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
3321 JSCell* weak = dfgCommon->weakReferences[i].get();
3322 if (Heap::isMarked(weak))
3324 dataLog(" Weak reference ", RawPointer(weak), ".\n");
3328 #endif // ENABLE(DFG_JIT)
3330 DeferGCForAWhile deferGC(*heap());
3332 // We want to accomplish two things here:
3333 // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
3334 // we should OSR exit at the top of the next bytecode instruction after the return.
3335 // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
3338 if (reason != Profiler::JettisonDueToOldAge) {
3339 if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
3340 compilation->setJettisonReason(reason, detail);
3342 // This accomplishes (1), and does its own book-keeping about whether it has already happened.
3343 if (!jitCode()->dfgCommon()->invalidate()) {
3344 // We've already been invalidated.
3345 RELEASE_ASSERT(this != replacement());
3350 if (DFG::shouldDumpDisassembly())
3351 dataLog(" Did invalidate ", *this, "\n");
3353 // Count the reoptimization if that's what the user wanted.
3354 if (mode == CountReoptimization) {
3355 // FIXME: Maybe this should call alternative().
3356 // https://bugs.webkit.org/show_bug.cgi?id=123677
3357 baselineAlternative()->countReoptimization();
3358 if (DFG::shouldDumpDisassembly())
3359 dataLog(" Did count reoptimization for ", *this, "\n");
3362 if (this != replacement()) {
3363 // This means that we were never the entrypoint. This can happen for OSR entry code
3369 alternative()->optimizeAfterWarmUp();
3371 if (reason != Profiler::JettisonDueToOldAge)
3372 tallyFrequentExitSites();
3373 #endif // ENABLE(DFG_JIT)
3375 // This accomplishes (2).
3376 ownerScriptExecutable()->installCode(
3377 m_globalObject->vm(), alternative(), codeType(), specializationKind());
3380 if (DFG::shouldDumpDisassembly())
3381 dataLog(" Did install baseline version of ", *this, "\n");
3382 #endif // ENABLE(DFG_JIT)
3385 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
3387 if (!codeOrigin.inlineCallFrame)
3388 return globalObject();
3389 return codeOrigin.inlineCallFrame->baselineCodeBlock->globalObject();
3392 class RecursionCheckFunctor {
3394 RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
3395 : m_startCallFrame(startCallFrame)
3396 , m_codeBlock(codeBlock)
3397 , m_depthToCheck(depthToCheck)
3398 , m_foundStartCallFrame(false)
3399 , m_didRecurse(false)
3402 StackVisitor::Status operator()(StackVisitor& visitor) const
3404 CallFrame* currentCallFrame = visitor->callFrame();
3406 if (currentCallFrame == m_startCallFrame)
3407 m_foundStartCallFrame = true;
3409 if (m_foundStartCallFrame) {
3410 if (visitor->callFrame()->codeBlock() == m_codeBlock) {
3411 m_didRecurse = true;
3412 return StackVisitor::Done;
3415 if (!m_depthToCheck--)
3416 return StackVisitor::Done;
3419 return StackVisitor::Continue;
3422 bool didRecurse() const { return m_didRecurse; }
3425 CallFrame* m_startCallFrame;
3426 CodeBlock* m_codeBlock;
3427 mutable unsigned m_depthToCheck;
3428 mutable bool m_foundStartCallFrame;
3429 mutable bool m_didRecurse;
3432 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
3434 CodeBlock* callerCodeBlock = callerFrame->codeBlock();
3436 if (Options::verboseCallLink())
3437 dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
3440 if (!m_shouldAlwaysBeInlined)
3443 if (!callerCodeBlock) {
3444 m_shouldAlwaysBeInlined = false;
3445 if (Options::verboseCallLink())
3446 dataLog(" Clearing SABI because caller is native.\n");
3450 if (!hasBaselineJITProfiling())
3453 if (!DFG::mightInlineFunction(this))
3456 if (!canInline(capabilityLevelState()))
3459 if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
3460 m_shouldAlwaysBeInlined = false;
3461 if (Options::verboseCallLink())
3462 dataLog(" Clearing SABI because caller is too large.\n");
3466 if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
3467 // If the caller is still in the interpreter, then we can't expect inlining to
3468 // happen anytime soon. Assume it's profitable to optimize it separately. This
3469 // ensures that a function is SABI only if it is called no more frequently than
3470 // any of its callers.
3471 m_shouldAlwaysBeInlined = false;
3472 if (Options::verboseCallLink())
3473 dataLog(" Clearing SABI because caller is in LLInt.\n");
3477 if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
3478 m_shouldAlwaysBeInlined = false;
3479 if (Options::verboseCallLink())
3480 dataLog(" Clearing SABI bcause caller was already optimized.\n");
3484 if (callerCodeBlock->codeType() != FunctionCode) {
3485 // If the caller is either eval or global code, assume that that won't be
3486 // optimized anytime soon. For eval code this is particularly true since we
3487 // delay eval optimization by a *lot*.
3488 m_shouldAlwaysBeInlined = false;
3489 if (Options::verboseCallLink())
3490 dataLog(" Clearing SABI because caller is not a function.\n");
3494 // Recursive calls won't be inlined.
3495 RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
3496 vm()->topCallFrame->iterate(functor);
3498 if (functor.didRecurse()) {
3499 if (Options::verboseCallLink())
3500 dataLog(" Clearing SABI because recursion was detected.\n");
3501 m_shouldAlwaysBeInlined = false;
3505 if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) {
3506 dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
3510 if (canCompile(callerCodeBlock->capabilityLevelState()))
3513 if (Options::verboseCallLink())
3514 dataLog(" Clearing SABI because the caller is not a DFG candidate.\n");
3516 m_shouldAlwaysBeInlined = false;
3520 unsigned CodeBlock::reoptimizationRetryCounter() const
3523 ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
3524 return m_reoptimizationRetryCounter;
3527 #endif // ENABLE(JIT)
3531 void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters)
3533 m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters);
3536 void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList)
3538 m_calleeSaveRegisters = WTFMove(registerAtOffsetList);
3541 static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters)
3543 static const unsigned cpuRegisterSize = sizeof(void*);
3544 return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * cpuRegisterSize) / sizeof(Register));
3548 size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()
3550 return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters());
3553 size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters()
3555 return roundCalleeSaveSpaceAsVirtualRegisters(m_calleeSaveRegisters->size());
3558 void CodeBlock::countReoptimization()
3560 m_reoptimizationRetryCounter++;
3561 if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
3562 m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
3565 unsigned CodeBlock::numberOfDFGCompiles()
3567 ASSERT(JITCode::isBaselineCode(jitType()));
3568 if (Options::testTheFTL()) {
3569 if (m_didFailFTLCompilation)
3571 return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
3573 return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
3576 int32_t CodeBlock::codeTypeThresholdMultiplier() const
3578 if (codeType() == EvalCode)
3579 return Options::evalThresholdMultiplier();
3584 double CodeBlock::optimizationThresholdScalingFactor()
3586 // This expression arises from doing a least-squares fit of
3588 // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
3590 // against the data points:
3593 // 10 0.9 (smallest reasonable code block)
3594 // 200 1.0 (typical small-ish code block)
3595 // 320 1.2 (something I saw in 3d-cube that I wanted to optimize)
3596 // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize)
3597 // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort)
3598 // 10000 6.0 (similar to above)
3600 // I achieve the minimization using the following Mathematica code:
3602 // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
3604 // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
3607 // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
3608 // {a, b, c, d}][[2]]
3610 // And the code below (to initialize a, b, c, d) is generated by:
3612 // Print["const double " <> ToString[#[[1]]] <> " = " <>
3613 // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
3615 // We've long known the following to be true:
3616 // - Small code blocks are cheap to optimize and so we should do it sooner rather
3618 // - Large code blocks are expensive to optimize and so we should postpone doing so,
3619 // and sometimes have a large enough threshold that we never optimize them.
3620 // - The difference in cost is not totally linear because (a) just invoking the
3621 // DFG incurs some base cost and (b) for large code blocks there is enough slop
3622 // in the correlation between instruction count and the actual compilation cost
3623 // that for those large blocks, the instruction count should not have a strong
3624 // influence on our threshold.
3626 // I knew the goals but I didn't know how to achieve them; so I picked an interesting
3627 // example where the heuristics were right (code block in 3d-cube with instruction
3628 // count 320, which got compiled early as it should have been) and one where they were
3629 // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
3630 // to compile and didn't run often enough to warrant compilation in my opinion), and
3631 // then threw in additional data points that represented my own guess of what our
3632 // heuristics should do for some round-numbered examples.
3634 // The expression to which I decided to fit the data arose because I started with an
3635 // affine function, and then did two things: put the linear part in an Abs to ensure
3636 // that the fit didn't end up choosing a negative value of c (which would result in
3637 // the function turning over and going negative for large x) and I threw in a Sqrt
3638 // term because Sqrt represents my intution that the function should be more sensitive
3639 // to small changes in small values of x, but less sensitive when x gets large.
3641 // Note that the current fit essentially eliminates the linear portion of the
3642 // expression (c == 0.0).
3643 const double a = 0.061504;
3644 const double b = 1.02406;
3645 const double c = 0.0;
3646 const double d = 0.825914;
3648 double instructionCount = this->instructionCount();
3650 ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
3652 double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
3654 result *= codeTypeThresholdMultiplier();
3656 if (Options::verboseOSR()) {
3658 *this, ": instruction count is ", instructionCount,
3659 ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
3665 static int32_t clipThreshold(double threshold)
3667 if (threshold < 1.0)
3670 if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
3671 return std::numeric_limits<int32_t>::max();
3673 return static_cast<int32_t>(threshold);
3676 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
3678 return clipThreshold(
3679 static_cast<double>(desiredThreshold) *
3680 optimizationThresholdScalingFactor() *
3681 (1 << reoptimizationRetryCounter()));
3684 bool CodeBlock::checkIfOptimizationThresholdReached()
3687 if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
3688 if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
3689 == DFG::Worklist::Compiled) {
3690 optimizeNextInvocation();
3696 return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
3699 void CodeBlock::optimizeNextInvocation()
3701 if (Options::verboseOSR())
3702 dataLog(*this, ": Optimizing next invocation.\n");
3703 m_jitExecuteCounter.setNewThreshold(0, this);
3706 void CodeBlock::dontOptimizeAnytimeSoon()
3708 if (Options::verboseOSR())
3709 dataLog(*this, ": Not optimizing anytime soon.\n");
3710 m_jitExecuteCounter.deferIndefinitely();
3713 void CodeBlock::optimizeAfterWarmUp()
3715 if (Options::verboseOSR())
3716 dataLog(*this, ": Optimizing after warm-up.\n");
3718 m_jitExecuteCounter.setNewThreshold(
3719 adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
3723 void CodeBlock::optimizeAfterLongWarmUp()
3725 if (Options::verboseOSR())
3726 dataLog(*this, ": Optimizing after long warm-up.\n");
3728 m_jitExecuteCounter.setNewThreshold(
3729 adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
3733 void CodeBlock::optimizeSoon()
3735 if (Options::verboseOSR())
3736 dataLog(*this, ": Optimizing soon.\n");
3738 m_jitExecuteCounter.setNewThreshold(
3739 adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
3743 void CodeBlock::forceOptimizationSlowPathConcurrently()
3745 if (Options::verboseOSR())
3746 dataLog(*this, ": Forcing slow path concurrently.\n");
3747 m_jitExecuteCounter.forceSlowPathConcurrently();
3751 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
3753 JITCode::JITType type = jitType();
3754 if (type != JITCode::BaselineJIT) {
3755 dataLog(*this, ": expected to have baseline code but have ", type, "\n");
3756 RELEASE_ASSERT_NOT_REACHED();
3759 CodeBlock* theReplacement = replacement();
3760 if ((result == CompilationSuccessful) != (theReplacement != this)) {
3761 dataLog(*this, ": we have result = ", result, " but ");
3762 if (theReplacement == this)
3763 dataLog("we are our own replacement.\n");
3765 dataLog("our replacement is ", pointerDump(theReplacement), "\n");
3766 RELEASE_ASSERT_NOT_REACHED();
3770 case CompilationSuccessful:
3771 RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3772 optimizeNextInvocation();