2 * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "BasicBlockLocation.h"
34 #include "BytecodeGenerator.h"
35 #include "BytecodeUseDef.h"
36 #include "CallLinkStatus.h"
37 #include "DFGCapabilities.h"
38 #include "DFGCommon.h"
39 #include "DFGDriver.h"
40 #include "DFGJITCode.h"
41 #include "DFGWorklist.h"
43 #include "FunctionExecutableDump.h"
44 #include "GetPutInfo.h"
45 #include "InlineCallFrame.h"
46 #include "Interpreter.h"
49 #include "JSCJSValue.h"
50 #include "JSFunction.h"
51 #include "JSLexicalEnvironment.h"
52 #include "JSModuleEnvironment.h"
53 #include "LLIntEntrypoint.h"
54 #include "LowLevelInterpreter.h"
55 #include "JSCInlines.h"
56 #include "PolymorphicAccess.h"
57 #include "ProfilerDatabase.h"
58 #include "ReduceWhitespace.h"
60 #include "SlotVisitorInlines.h"
61 #include "StackVisitor.h"
62 #include "TypeLocationCache.h"
63 #include "TypeProfiler.h"
64 #include "UnlinkedInstructionStream.h"
65 #include <wtf/BagToHashMap.h>
66 #include <wtf/CommaPrinter.h>
67 #include <wtf/StringExtras.h>
68 #include <wtf/StringPrintStream.h>
69 #include <wtf/text/UniquedStringImpl.h>
72 #include "RegisterAtOffsetList.h"
76 #include "DFGOperations.h"
80 #include "FTLJITCode.h"
85 CString CodeBlock::inferredName() const
93 return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
98 return CString("", 0);
102 bool CodeBlock::hasHash() const
107 bool CodeBlock::isSafeToComputeHash() const
109 return !isCompilationThread();
112 CodeBlockHash CodeBlock::hash() const
115 RELEASE_ASSERT(isSafeToComputeHash());
116 m_hash = CodeBlockHash(ownerScriptExecutable()->source(), specializationKind());
121 CString CodeBlock::sourceCodeForTools() const
123 if (codeType() != FunctionCode)
124 return ownerScriptExecutable()->source().toUTF8();
126 SourceProvider* provider = source();
127 FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
128 UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
129 unsigned unlinkedStartOffset = unlinked->startOffset();
130 unsigned linkedStartOffset = executable->source().startOffset();
131 int delta = linkedStartOffset - unlinkedStartOffset;
132 unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
133 unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
136 provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
139 CString CodeBlock::sourceCodeOnOneLine() const
141 return reduceWhitespace(sourceCodeForTools());
144 CString CodeBlock::hashAsStringIfPossible() const
146 if (hasHash() || isSafeToComputeHash())
147 return toCString(hash());
151 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
153 out.print(inferredName(), "#", hashAsStringIfPossible());
154 out.print(":[", RawPointer(this), "->");
156 out.print(RawPointer(m_alternative.get()), "->");
157 out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
159 if (codeType() == FunctionCode)
160 out.print(specializationKind());
161 out.print(", ", instructionCount());
162 if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
163 out.print(" (ShouldAlwaysBeInlined)");
164 if (ownerScriptExecutable()->neverInline())
165 out.print(" (NeverInline)");
166 if (ownerScriptExecutable()->neverOptimize())
167 out.print(" (NeverOptimize)");
168 if (ownerScriptExecutable()->didTryToEnterInLoop())
169 out.print(" (DidTryToEnterInLoop)");
170 if (ownerScriptExecutable()->isStrictMode())
171 out.print(" (StrictMode)");
172 if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
173 out.print(" (FTLFail)");
174 if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
175 out.print(" (HadFTLReplacement)");
179 void CodeBlock::dump(PrintStream& out) const
181 dumpAssumingJITType(out, jitType());
184 static CString idName(int id0, const Identifier& ident)
186 return toCString(ident.impl(), "(@id", id0, ")");
189 CString CodeBlock::registerName(int r) const
191 if (isConstantRegisterIndex(r))
192 return constantName(r);
194 return toCString(VirtualRegister(r));
197 CString CodeBlock::constantName(int index) const
199 JSValue value = getConstant(index);
200 return toCString(value, "(", VirtualRegister(index), ")");
203 static CString regexpToSourceString(RegExp* regExp)
205 char postfix[5] = { '/', 0, 0, 0, 0 };
207 if (regExp->global())
208 postfix[index++] = 'g';
209 if (regExp->ignoreCase())
210 postfix[index++] = 'i';
211 if (regExp->multiline())
212 postfix[index] = 'm';
214 return toCString("/", regExp->pattern().impl(), postfix);
217 static CString regexpName(int re, RegExp* regexp)
219 return toCString(regexpToSourceString(regexp), "(@re", re, ")");
222 NEVER_INLINE static const char* debugHookName(int debugHookID)
224 switch (static_cast<DebugHookID>(debugHookID)) {
225 case DidEnterCallFrame:
226 return "didEnterCallFrame";
227 case WillLeaveCallFrame:
228 return "willLeaveCallFrame";
229 case WillExecuteStatement:
230 return "willExecuteStatement";
231 case WillExecuteProgram:
232 return "willExecuteProgram";
233 case DidExecuteProgram:
234 return "didExecuteProgram";
235 case DidReachBreakpoint:
236 return "didReachBreakpoint";
239 RELEASE_ASSERT_NOT_REACHED();
243 void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
245 int r0 = (++it)->u.operand;
246 int r1 = (++it)->u.operand;
248 printLocationAndOp(out, exec, location, it, op);
249 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
252 void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
254 int r0 = (++it)->u.operand;
255 int r1 = (++it)->u.operand;
256 int r2 = (++it)->u.operand;
257 printLocationAndOp(out, exec, location, it, op);
258 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
261 void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
263 int r0 = (++it)->u.operand;
264 int offset = (++it)->u.operand;
265 printLocationAndOp(out, exec, location, it, op);
266 out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
269 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
272 switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
276 case op_get_array_length:
280 RELEASE_ASSERT_NOT_REACHED();
281 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
285 int r0 = (++it)->u.operand;
286 int r1 = (++it)->u.operand;
287 int id0 = (++it)->u.operand;
288 printLocationAndOp(out, exec, location, it, op);
289 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
290 it += 4; // Increment up to the value profiler.
293 static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
298 out.printf("%s = %p", name, structure);
300 PropertyOffset offset = structure->getConcurrently(ident.impl());
301 if (offset != invalidOffset)
302 out.printf(" (offset = %d)", offset);
305 static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
307 out.printf("chain = %p: [", chain);
309 for (WriteBarrier<Structure>* currentStructure = chain->head();
311 ++currentStructure) {
316 dumpStructure(out, "struct", currentStructure->get(), ident);
321 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
323 Instruction* instruction = instructions().begin() + location;
325 const Identifier& ident = identifier(instruction[3].u.operand);
327 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
329 if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
330 out.printf(" llint(array_length)");
331 else if (StructureID structureID = instruction[4].u.structureID) {
332 Structure* structure = m_vm->heap.structureIDTable().get(structureID);
333 out.printf(" llint(");
334 dumpStructure(out, "struct", structure, ident);
339 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
340 StructureStubInfo& stubInfo = *stubPtr;
341 if (stubInfo.resetByGC)
342 out.print(" (Reset By GC)");
347 Structure* baseStructure = nullptr;
348 PolymorphicAccess* stub = nullptr;
350 switch (stubInfo.cacheType) {
351 case CacheType::GetByIdSelf:
353 baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get();
355 case CacheType::Stub:
357 stub = stubInfo.u.stub;
359 case CacheType::Unset:
363 RELEASE_ASSERT_NOT_REACHED();
369 dumpStructure(out, "struct", baseStructure, ident);
373 out.print(", ", *stub);
383 void CodeBlock::printPutByIdCacheStatus(PrintStream& out, int location, const StubInfoMap& map)
385 Instruction* instruction = instructions().begin() + location;
387 const Identifier& ident = identifier(instruction[2].u.operand);
389 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
391 out.print(", ", instruction[8].u.putByIdFlags);
393 if (StructureID structureID = instruction[4].u.structureID) {
394 Structure* structure = m_vm->heap.structureIDTable().get(structureID);
395 out.print(" llint(");
396 if (StructureID newStructureID = instruction[6].u.structureID) {
397 Structure* newStructure = m_vm->heap.structureIDTable().get(newStructureID);
398 dumpStructure(out, "prev", structure, ident);
400 dumpStructure(out, "next", newStructure, ident);
401 if (StructureChain* chain = instruction[7].u.structureChain.get()) {
403 dumpChain(out, chain, ident);
406 dumpStructure(out, "struct", structure, ident);
411 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
412 StructureStubInfo& stubInfo = *stubPtr;
413 if (stubInfo.resetByGC)
414 out.print(" (Reset By GC)");
419 switch (stubInfo.cacheType) {
420 case CacheType::PutByIdReplace:
421 out.print("replace, ");
422 dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident);
424 case CacheType::Stub: {
425 out.print("stub, ", *stubInfo.u.stub);
428 case CacheType::Unset:
432 RELEASE_ASSERT_NOT_REACHED();
443 void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
445 int dst = (++it)->u.operand;
446 int func = (++it)->u.operand;
447 int argCount = (++it)->u.operand;
448 int registerOffset = (++it)->u.operand;
449 printLocationAndOp(out, exec, location, it, op);
450 out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
451 if (cacheDumpMode == DumpCaches) {
452 LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
453 if (callLinkInfo->lastSeenCallee) {
455 " llint(%p, exec %p)",
456 callLinkInfo->lastSeenCallee.get(),
457 callLinkInfo->lastSeenCallee->executable());
460 if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
461 JSFunction* target = info->lastSeenCallee();
463 out.printf(" jit(%p, exec %p)", target, target->executable());
466 if (jitType() != JITCode::FTLJIT)
467 out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
474 dumpArrayProfiling(out, it, hasPrintedProfiling);
475 dumpValueProfiling(out, it, hasPrintedProfiling);
478 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
480 int r0 = (++it)->u.operand;
481 int id0 = (++it)->u.operand;
482 int r1 = (++it)->u.operand;
483 printLocationAndOp(out, exec, location, it, op);
484 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
488 void CodeBlock::dumpSource()
490 dumpSource(WTF::dataFile());
493 void CodeBlock::dumpSource(PrintStream& out)
495 ScriptExecutable* executable = ownerScriptExecutable();
496 if (executable->isFunctionExecutable()) {
497 FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
498 String source = functionExecutable->source().provider()->getRange(
499 functionExecutable->parametersStartOffset(),
500 functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
502 out.print("function ", inferredName(), source);
505 out.print(executable->source().toString());
508 void CodeBlock::dumpBytecode()
510 dumpBytecode(WTF::dataFile());
513 void CodeBlock::dumpBytecode(PrintStream& out)
515 // We only use the ExecState* for things that don't actually lead to JS execution,
516 // like converting a JSString to a String. Hence the globalExec is appropriate.
517 ExecState* exec = m_globalObject->globalExec();
519 size_t instructionCount = 0;
521 for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
526 ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
527 static_cast<unsigned long>(instructions().size()),
528 static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
529 m_numParameters, m_numCalleeRegisters, m_numVars);
530 if (needsActivation() && codeType() == FunctionCode)
531 out.printf("; lexical environment in r%d", activationRegister().offset());
534 StubInfoMap stubInfos;
535 CallLinkInfoMap callLinkInfos;
536 getStubInfoMap(stubInfos);
537 getCallLinkInfoMap(callLinkInfos);
539 const Instruction* begin = instructions().begin();
540 const Instruction* end = instructions().end();
541 for (const Instruction* it = begin; it != end; ++it)
542 dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
544 if (numberOfIdentifiers()) {
545 out.printf("\nIdentifiers:\n");
548 out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
550 } while (i != numberOfIdentifiers());
553 if (!m_constantRegisters.isEmpty()) {
554 out.printf("\nConstants:\n");
557 const char* sourceCodeRepresentationDescription = nullptr;
558 switch (m_constantsSourceCodeRepresentation[i]) {
559 case SourceCodeRepresentation::Double:
560 sourceCodeRepresentationDescription = ": in source as double";
562 case SourceCodeRepresentation::Integer:
563 sourceCodeRepresentationDescription = ": in source as integer";
565 case SourceCodeRepresentation::Other:
566 sourceCodeRepresentationDescription = "";
569 out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
571 } while (i < m_constantRegisters.size());
574 if (size_t count = m_unlinkedCode->numberOfRegExps()) {
575 out.printf("\nm_regexps:\n");
578 out.printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
583 if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
584 out.printf("\nException Handlers:\n");
587 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
588 out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n",
589 i + 1, handler.start, handler.end, handler.target, handler.typeName());
591 } while (i < m_rareData->m_exceptionHandlers.size());
594 if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
595 out.printf("Switch Jump Tables:\n");
598 out.printf(" %1d = {\n", i);
600 Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
601 for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
604 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
608 } while (i < m_rareData->m_switchJumpTables.size());
611 if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
612 out.printf("\nString Switch Jump Tables:\n");
615 out.printf(" %1d = {\n", i);
616 StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
617 for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
618 out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
621 } while (i < m_rareData->m_stringSwitchJumpTables.size());
627 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
629 if (hasPrintedProfiling) {
635 hasPrintedProfiling = true;
638 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
640 ConcurrentJITLocker locker(m_lock);
643 CString description = it->u.profile->briefDescription(locker);
644 if (!description.length())
646 beginDumpProfiling(out, hasPrintedProfiling);
647 out.print(description);
650 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
652 ConcurrentJITLocker locker(m_lock);
655 if (!it->u.arrayProfile)
657 CString description = it->u.arrayProfile->briefDescription(locker, this);
658 if (!description.length())
660 beginDumpProfiling(out, hasPrintedProfiling);
661 out.print(description);
664 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
666 if (!profile || !profile->m_counter)
669 beginDumpProfiling(out, hasPrintedProfiling);
670 out.print(name, profile->m_counter);
673 void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
675 out.printf("[%4d] %-17s ", location, op);
678 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
680 printLocationAndOp(out, exec, location, it, op);
681 out.printf("%s", registerName(operand).data());
684 void CodeBlock::dumpBytecode(
685 PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
686 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
688 int location = it - begin;
689 bool hasPrintedProfiling = false;
690 OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
693 printLocationAndOp(out, exec, location, it, "enter");
697 int r0 = (++it)->u.operand;
698 printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
701 case op_load_arrowfunction_this: {
702 int r0 = (++it)->u.operand;
703 printLocationOpAndRegisterOperand(out, exec, location, it, "load_arrowfunction_this", r0);
706 case op_create_direct_arguments: {
707 int r0 = (++it)->u.operand;
708 printLocationAndOp(out, exec, location, it, "create_direct_arguments");
709 out.printf("%s", registerName(r0).data());
712 case op_create_scoped_arguments: {
713 int r0 = (++it)->u.operand;
714 int r1 = (++it)->u.operand;
715 printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
716 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
719 case op_create_out_of_band_arguments: {
720 int r0 = (++it)->u.operand;
721 printLocationAndOp(out, exec, location, it, "create_out_of_band_arguments");
722 out.printf("%s", registerName(r0).data());
725 case op_create_this: {
726 int r0 = (++it)->u.operand;
727 int r1 = (++it)->u.operand;
728 unsigned inferredInlineCapacity = (++it)->u.operand;
729 unsigned cachedFunction = (++it)->u.operand;
730 printLocationAndOp(out, exec, location, it, "create_this");
731 out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
735 int r0 = (++it)->u.operand;
736 printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
737 Structure* structure = (++it)->u.structure.get();
739 out.print(", cache(struct = ", RawPointer(structure), ")");
740 out.print(", ", (++it)->u.toThisStatus);
744 int r0 = (++it)->u.operand;
745 printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
748 case op_new_object: {
749 int r0 = (++it)->u.operand;
750 unsigned inferredInlineCapacity = (++it)->u.operand;
751 printLocationAndOp(out, exec, location, it, "new_object");
752 out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
753 ++it; // Skip object allocation profile.
757 int dst = (++it)->u.operand;
758 int argv = (++it)->u.operand;
759 int argc = (++it)->u.operand;
760 printLocationAndOp(out, exec, location, it, "new_array");
761 out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
762 ++it; // Skip array allocation profile.
765 case op_new_array_with_size: {
766 int dst = (++it)->u.operand;
767 int length = (++it)->u.operand;
768 printLocationAndOp(out, exec, location, it, "new_array_with_size");
769 out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
770 ++it; // Skip array allocation profile.
773 case op_new_array_buffer: {
774 int dst = (++it)->u.operand;
775 int argv = (++it)->u.operand;
776 int argc = (++it)->u.operand;
777 printLocationAndOp(out, exec, location, it, "new_array_buffer");
778 out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
779 ++it; // Skip array allocation profile.
782 case op_new_regexp: {
783 int r0 = (++it)->u.operand;
784 int re0 = (++it)->u.operand;
785 printLocationAndOp(out, exec, location, it, "new_regexp");
786 out.printf("%s, ", registerName(r0).data());
787 if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
788 out.printf("%s", regexpName(re0, regexp(re0)).data());
790 out.printf("bad_regexp(%d)", re0);
794 int r0 = (++it)->u.operand;
795 int r1 = (++it)->u.operand;
796 printLocationAndOp(out, exec, location, it, "mov");
797 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
800 case op_profile_type: {
801 int r0 = (++it)->u.operand;
806 printLocationAndOp(out, exec, location, it, "op_profile_type");
807 out.printf("%s", registerName(r0).data());
810 case op_profile_control_flow: {
811 BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
812 printLocationAndOp(out, exec, location, it, "profile_control_flow");
813 out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
817 printUnaryOp(out, exec, location, it, "not");
821 printBinaryOp(out, exec, location, it, "eq");
825 printUnaryOp(out, exec, location, it, "eq_null");
829 printBinaryOp(out, exec, location, it, "neq");
833 printUnaryOp(out, exec, location, it, "neq_null");
837 printBinaryOp(out, exec, location, it, "stricteq");
841 printBinaryOp(out, exec, location, it, "nstricteq");
845 printBinaryOp(out, exec, location, it, "less");
849 printBinaryOp(out, exec, location, it, "lesseq");
853 printBinaryOp(out, exec, location, it, "greater");
857 printBinaryOp(out, exec, location, it, "greatereq");
861 int r0 = (++it)->u.operand;
862 printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
866 int r0 = (++it)->u.operand;
867 printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
871 printUnaryOp(out, exec, location, it, "to_number");
875 printUnaryOp(out, exec, location, it, "to_string");
879 printUnaryOp(out, exec, location, it, "negate");
883 printBinaryOp(out, exec, location, it, "add");
888 printBinaryOp(out, exec, location, it, "mul");
893 printBinaryOp(out, exec, location, it, "div");
898 printBinaryOp(out, exec, location, it, "mod");
902 printBinaryOp(out, exec, location, it, "sub");
907 printBinaryOp(out, exec, location, it, "lshift");
911 printBinaryOp(out, exec, location, it, "rshift");
915 printBinaryOp(out, exec, location, it, "urshift");
919 printBinaryOp(out, exec, location, it, "bitand");
924 printBinaryOp(out, exec, location, it, "bitxor");
929 printBinaryOp(out, exec, location, it, "bitor");
933 case op_check_has_instance: {
934 int r0 = (++it)->u.operand;
935 int r1 = (++it)->u.operand;
936 int r2 = (++it)->u.operand;
937 int offset = (++it)->u.operand;
938 printLocationAndOp(out, exec, location, it, "check_has_instance");
939 out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
942 case op_instanceof: {
943 int r0 = (++it)->u.operand;
944 int r1 = (++it)->u.operand;
945 int r2 = (++it)->u.operand;
946 printLocationAndOp(out, exec, location, it, "instanceof");
947 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
951 printUnaryOp(out, exec, location, it, "unsigned");
955 printUnaryOp(out, exec, location, it, "typeof");
958 case op_is_undefined: {
959 printUnaryOp(out, exec, location, it, "is_undefined");
962 case op_is_boolean: {
963 printUnaryOp(out, exec, location, it, "is_boolean");
967 printUnaryOp(out, exec, location, it, "is_number");
971 printUnaryOp(out, exec, location, it, "is_string");
975 printUnaryOp(out, exec, location, it, "is_object");
978 case op_is_object_or_null: {
979 printUnaryOp(out, exec, location, it, "is_object_or_null");
982 case op_is_function: {
983 printUnaryOp(out, exec, location, it, "is_function");
987 printBinaryOp(out, exec, location, it, "in");
991 case op_get_array_length: {
992 printGetByIdOp(out, exec, location, it);
993 printGetByIdCacheStatus(out, exec, location, stubInfos);
994 dumpValueProfiling(out, it, hasPrintedProfiling);
998 printPutByIdOp(out, exec, location, it, "put_by_id");
999 printPutByIdCacheStatus(out, location, stubInfos);
1002 case op_put_getter_by_id: {
1003 int r0 = (++it)->u.operand;
1004 int id0 = (++it)->u.operand;
1005 int n0 = (++it)->u.operand;
1006 int r1 = (++it)->u.operand;
1007 printLocationAndOp(out, exec, location, it, "put_getter_by_id");
1008 out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
1011 case op_put_setter_by_id: {
1012 int r0 = (++it)->u.operand;
1013 int id0 = (++it)->u.operand;
1014 int n0 = (++it)->u.operand;
1015 int r1 = (++it)->u.operand;
1016 printLocationAndOp(out, exec, location, it, "put_setter_by_id");
1017 out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data());
1020 case op_put_getter_setter: {
1021 int r0 = (++it)->u.operand;
1022 int id0 = (++it)->u.operand;
1023 int n0 = (++it)->u.operand;
1024 int r1 = (++it)->u.operand;
1025 int r2 = (++it)->u.operand;
1026 printLocationAndOp(out, exec, location, it, "put_getter_setter");
1027 out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data());
1030 case op_put_getter_by_val: {
1031 int r0 = (++it)->u.operand;
1032 int r1 = (++it)->u.operand;
1033 int n0 = (++it)->u.operand;
1034 int r2 = (++it)->u.operand;
1035 printLocationAndOp(out, exec, location, it, "put_getter_by_val");
1036 out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
1039 case op_put_setter_by_val: {
1040 int r0 = (++it)->u.operand;
1041 int r1 = (++it)->u.operand;
1042 int n0 = (++it)->u.operand;
1043 int r2 = (++it)->u.operand;
1044 printLocationAndOp(out, exec, location, it, "put_setter_by_val");
1045 out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data());
1048 case op_del_by_id: {
1049 int r0 = (++it)->u.operand;
1050 int r1 = (++it)->u.operand;
1051 int id0 = (++it)->u.operand;
1052 printLocationAndOp(out, exec, location, it, "del_by_id");
1053 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1056 case op_get_by_val: {
1057 int r0 = (++it)->u.operand;
1058 int r1 = (++it)->u.operand;
1059 int r2 = (++it)->u.operand;
1060 printLocationAndOp(out, exec, location, it, "get_by_val");
1061 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1062 dumpArrayProfiling(out, it, hasPrintedProfiling);
1063 dumpValueProfiling(out, it, hasPrintedProfiling);
1066 case op_put_by_val: {
1067 int r0 = (++it)->u.operand;
1068 int r1 = (++it)->u.operand;
1069 int r2 = (++it)->u.operand;
1070 printLocationAndOp(out, exec, location, it, "put_by_val");
1071 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1072 dumpArrayProfiling(out, it, hasPrintedProfiling);
1075 case op_put_by_val_direct: {
1076 int r0 = (++it)->u.operand;
1077 int r1 = (++it)->u.operand;
1078 int r2 = (++it)->u.operand;
1079 printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1080 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1081 dumpArrayProfiling(out, it, hasPrintedProfiling);
1084 case op_del_by_val: {
1085 int r0 = (++it)->u.operand;
1086 int r1 = (++it)->u.operand;
1087 int r2 = (++it)->u.operand;
1088 printLocationAndOp(out, exec, location, it, "del_by_val");
1089 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1092 case op_put_by_index: {
1093 int r0 = (++it)->u.operand;
1094 unsigned n0 = (++it)->u.operand;
1095 int r1 = (++it)->u.operand;
1096 printLocationAndOp(out, exec, location, it, "put_by_index");
1097 out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1101 int offset = (++it)->u.operand;
1102 printLocationAndOp(out, exec, location, it, "jmp");
1103 out.printf("%d(->%d)", offset, location + offset);
1107 printConditionalJump(out, exec, begin, it, location, "jtrue");
1111 printConditionalJump(out, exec, begin, it, location, "jfalse");
1115 printConditionalJump(out, exec, begin, it, location, "jeq_null");
1118 case op_jneq_null: {
1119 printConditionalJump(out, exec, begin, it, location, "jneq_null");
1123 int r0 = (++it)->u.operand;
1124 Special::Pointer pointer = (++it)->u.specialPointer;
1125 int offset = (++it)->u.operand;
1126 printLocationAndOp(out, exec, location, it, "jneq_ptr");
1127 out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1131 int r0 = (++it)->u.operand;
1132 int r1 = (++it)->u.operand;
1133 int offset = (++it)->u.operand;
1134 printLocationAndOp(out, exec, location, it, "jless");
1135 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1139 int r0 = (++it)->u.operand;
1140 int r1 = (++it)->u.operand;
1141 int offset = (++it)->u.operand;
1142 printLocationAndOp(out, exec, location, it, "jlesseq");
1143 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1147 int r0 = (++it)->u.operand;
1148 int r1 = (++it)->u.operand;
1149 int offset = (++it)->u.operand;
1150 printLocationAndOp(out, exec, location, it, "jgreater");
1151 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1154 case op_jgreatereq: {
1155 int r0 = (++it)->u.operand;
1156 int r1 = (++it)->u.operand;
1157 int offset = (++it)->u.operand;
1158 printLocationAndOp(out, exec, location, it, "jgreatereq");
1159 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1163 int r0 = (++it)->u.operand;
1164 int r1 = (++it)->u.operand;
1165 int offset = (++it)->u.operand;
1166 printLocationAndOp(out, exec, location, it, "jnless");
1167 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1171 int r0 = (++it)->u.operand;
1172 int r1 = (++it)->u.operand;
1173 int offset = (++it)->u.operand;
1174 printLocationAndOp(out, exec, location, it, "jnlesseq");
1175 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1178 case op_jngreater: {
1179 int r0 = (++it)->u.operand;
1180 int r1 = (++it)->u.operand;
1181 int offset = (++it)->u.operand;
1182 printLocationAndOp(out, exec, location, it, "jngreater");
1183 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1186 case op_jngreatereq: {
1187 int r0 = (++it)->u.operand;
1188 int r1 = (++it)->u.operand;
1189 int offset = (++it)->u.operand;
1190 printLocationAndOp(out, exec, location, it, "jngreatereq");
1191 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1194 case op_loop_hint: {
1195 printLocationAndOp(out, exec, location, it, "loop_hint");
1198 case op_switch_imm: {
1199 int tableIndex = (++it)->u.operand;
1200 int defaultTarget = (++it)->u.operand;
1201 int scrutineeRegister = (++it)->u.operand;
1202 printLocationAndOp(out, exec, location, it, "switch_imm");
1203 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1206 case op_switch_char: {
1207 int tableIndex = (++it)->u.operand;
1208 int defaultTarget = (++it)->u.operand;
1209 int scrutineeRegister = (++it)->u.operand;
1210 printLocationAndOp(out, exec, location, it, "switch_char");
1211 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1214 case op_switch_string: {
1215 int tableIndex = (++it)->u.operand;
1216 int defaultTarget = (++it)->u.operand;
1217 int scrutineeRegister = (++it)->u.operand;
1218 printLocationAndOp(out, exec, location, it, "switch_string");
1219 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1223 int r0 = (++it)->u.operand;
1224 int r1 = (++it)->u.operand;
1225 int f0 = (++it)->u.operand;
1226 printLocationAndOp(out, exec, location, it, "new_func");
1227 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1230 case op_new_arrow_func_exp: {
1231 int r0 = (++it)->u.operand;
1232 int r1 = (++it)->u.operand;
1233 int f0 = (++it)->u.operand;
1234 int r2 = (++it)->u.operand;
1235 printLocationAndOp(out, exec, location, it, "op_new_arrow_func_exp");
1236 out.printf("%s, %s, f%d, %s", registerName(r0).data(), registerName(r1).data(), f0, registerName(r2).data());
1239 case op_new_func_exp: {
1240 int r0 = (++it)->u.operand;
1241 int r1 = (++it)->u.operand;
1242 int f0 = (++it)->u.operand;
1243 printLocationAndOp(out, exec, location, it, "new_func_exp");
1244 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1248 printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1251 case op_tail_call: {
1252 printCallOp(out, exec, location, it, "tail_call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1255 case op_call_eval: {
1256 printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
1260 case op_construct_varargs:
1261 case op_call_varargs:
1262 case op_tail_call_varargs: {
1263 int result = (++it)->u.operand;
1264 int callee = (++it)->u.operand;
1265 int thisValue = (++it)->u.operand;
1266 int arguments = (++it)->u.operand;
1267 int firstFreeRegister = (++it)->u.operand;
1268 int varArgOffset = (++it)->u.operand;
1270 printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : opcode == op_construct_varargs ? "construct_varargs" : "tail_call_varargs");
1271 out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
1272 dumpValueProfiling(out, it, hasPrintedProfiling);
1277 int r0 = (++it)->u.operand;
1278 printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1281 case op_construct: {
1282 printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
1286 int r0 = (++it)->u.operand;
1287 int r1 = (++it)->u.operand;
1288 int count = (++it)->u.operand;
1289 printLocationAndOp(out, exec, location, it, "strcat");
1290 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1293 case op_to_primitive: {
1294 int r0 = (++it)->u.operand;
1295 int r1 = (++it)->u.operand;
1296 printLocationAndOp(out, exec, location, it, "to_primitive");
1297 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1300 case op_get_enumerable_length: {
1301 int dst = it[1].u.operand;
1302 int base = it[2].u.operand;
1303 printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
1304 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1305 it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
1308 case op_has_indexed_property: {
1309 int dst = it[1].u.operand;
1310 int base = it[2].u.operand;
1311 int propertyName = it[3].u.operand;
1312 ArrayProfile* arrayProfile = it[4].u.arrayProfile;
1313 printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
1314 out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
1315 it += OPCODE_LENGTH(op_has_indexed_property) - 1;
1318 case op_has_structure_property: {
1319 int dst = it[1].u.operand;
1320 int base = it[2].u.operand;
1321 int propertyName = it[3].u.operand;
1322 int enumerator = it[4].u.operand;
1323 printLocationAndOp(out, exec, location, it, "op_has_structure_property");
1324 out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
1325 it += OPCODE_LENGTH(op_has_structure_property) - 1;
1328 case op_has_generic_property: {
1329 int dst = it[1].u.operand;
1330 int base = it[2].u.operand;
1331 int propertyName = it[3].u.operand;
1332 printLocationAndOp(out, exec, location, it, "op_has_generic_property");
1333 out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
1334 it += OPCODE_LENGTH(op_has_generic_property) - 1;
1337 case op_get_direct_pname: {
1338 int dst = it[1].u.operand;
1339 int base = it[2].u.operand;
1340 int propertyName = it[3].u.operand;
1341 int index = it[4].u.operand;
1342 int enumerator = it[5].u.operand;
1343 ValueProfile* profile = it[6].u.profile;
1344 printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
1345 out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
1346 it += OPCODE_LENGTH(op_get_direct_pname) - 1;
1350 case op_get_property_enumerator: {
1351 int dst = it[1].u.operand;
1352 int base = it[2].u.operand;
1353 printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
1354 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1355 it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
1358 case op_enumerator_structure_pname: {
1359 int dst = it[1].u.operand;
1360 int enumerator = it[2].u.operand;
1361 int index = it[3].u.operand;
1362 printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
1363 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1364 it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
1367 case op_enumerator_generic_pname: {
1368 int dst = it[1].u.operand;
1369 int enumerator = it[2].u.operand;
1370 int index = it[3].u.operand;
1371 printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
1372 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1373 it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
1376 case op_to_index_string: {
1377 int dst = it[1].u.operand;
1378 int index = it[2].u.operand;
1379 printLocationAndOp(out, exec, location, it, "op_to_index_string");
1380 out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
1381 it += OPCODE_LENGTH(op_to_index_string) - 1;
1384 case op_push_with_scope: {
1385 int dst = (++it)->u.operand;
1386 int newScope = (++it)->u.operand;
1387 int currentScope = (++it)->u.operand;
1388 printLocationAndOp(out, exec, location, it, "push_with_scope");
1389 out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data());
1392 case op_get_parent_scope: {
1393 int dst = (++it)->u.operand;
1394 int parentScope = (++it)->u.operand;
1395 printLocationAndOp(out, exec, location, it, "get_parent_scope");
1396 out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
1399 case op_create_lexical_environment: {
1400 int dst = (++it)->u.operand;
1401 int scope = (++it)->u.operand;
1402 int symbolTable = (++it)->u.operand;
1403 int initialValue = (++it)->u.operand;
1404 printLocationAndOp(out, exec, location, it, "create_lexical_environment");
1405 out.printf("%s, %s, %s, %s",
1406 registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
1410 int r0 = (++it)->u.operand;
1411 int r1 = (++it)->u.operand;
1412 printLocationAndOp(out, exec, location, it, "catch");
1413 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1417 int r0 = (++it)->u.operand;
1418 printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1421 case op_throw_static_error: {
1422 int k0 = (++it)->u.operand;
1423 int k1 = (++it)->u.operand;
1424 printLocationAndOp(out, exec, location, it, "throw_static_error");
1425 out.printf("%s, %s", constantName(k0).data(), k1 ? "true" : "false");
1429 int debugHookID = (++it)->u.operand;
1430 int hasBreakpointFlag = (++it)->u.operand;
1431 printLocationAndOp(out, exec, location, it, "debug");
1432 out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
1435 case op_profile_will_call: {
1436 int function = (++it)->u.operand;
1437 printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
1440 case op_profile_did_call: {
1441 int function = (++it)->u.operand;
1442 printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
1446 int r0 = (++it)->u.operand;
1447 printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1450 case op_resolve_scope: {
1451 int r0 = (++it)->u.operand;
1452 int scope = (++it)->u.operand;
1453 int id0 = (++it)->u.operand;
1454 ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand);
1455 int depth = (++it)->u.operand;
1456 void* pointer = (++it)->u.pointer;
1457 printLocationAndOp(out, exec, location, it, "resolve_scope");
1458 out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer);
1461 case op_get_from_scope: {
1462 int r0 = (++it)->u.operand;
1463 int r1 = (++it)->u.operand;
1464 int id0 = (++it)->u.operand;
1465 GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
1467 int operand = (++it)->u.operand; // Operand
1468 printLocationAndOp(out, exec, location, it, "get_from_scope");
1469 out.print(registerName(r0), ", ", registerName(r1));
1470 if (static_cast<unsigned>(id0) == UINT_MAX)
1471 out.print(", anonymous");
1473 out.print(", ", idName(id0, identifier(id0)));
1474 out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand);
1475 dumpValueProfiling(out, it, hasPrintedProfiling);
1478 case op_put_to_scope: {
1479 int r0 = (++it)->u.operand;
1480 int id0 = (++it)->u.operand;
1481 int r1 = (++it)->u.operand;
1482 GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand);
1484 int operand = (++it)->u.operand; // Operand
1485 printLocationAndOp(out, exec, location, it, "put_to_scope");
1486 out.print(registerName(r0));
1487 if (static_cast<unsigned>(id0) == UINT_MAX)
1488 out.print(", anonymous");
1490 out.print(", ", idName(id0, identifier(id0)));
1491 out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand);
1494 case op_get_from_arguments: {
1495 int r0 = (++it)->u.operand;
1496 int r1 = (++it)->u.operand;
1497 int offset = (++it)->u.operand;
1498 printLocationAndOp(out, exec, location, it, "get_from_arguments");
1499 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
1500 dumpValueProfiling(out, it, hasPrintedProfiling);
1503 case op_put_to_arguments: {
1504 int r0 = (++it)->u.operand;
1505 int offset = (++it)->u.operand;
1506 int r1 = (++it)->u.operand;
1507 printLocationAndOp(out, exec, location, it, "put_to_arguments");
1508 out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
1512 RELEASE_ASSERT_NOT_REACHED();
1515 dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1516 dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1519 Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1520 if (!exitSites.isEmpty()) {
1521 out.print(" !! frequent exits: ");
1523 for (unsigned i = 0; i < exitSites.size(); ++i)
1524 out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
1526 #else // ENABLE(DFG_JIT)
1527 UNUSED_PARAM(location);
1528 #endif // ENABLE(DFG_JIT)
1532 void CodeBlock::dumpBytecode(
1533 PrintStream& out, unsigned bytecodeOffset,
1534 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
1536 ExecState* exec = m_globalObject->globalExec();
1537 const Instruction* it = instructions().begin() + bytecodeOffset;
1538 dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
1541 #define FOR_EACH_MEMBER_VECTOR(macro) \
1542 macro(instructions) \
1543 macro(callLinkInfos) \
1544 macro(linkedCallerList) \
1545 macro(identifiers) \
1546 macro(functionExpressions) \
1547 macro(constantRegisters)
1549 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1552 macro(exceptionHandlers) \
1553 macro(switchJumpTables) \
1554 macro(stringSwitchJumpTables) \
1555 macro(evalCodeCache) \
1556 macro(expressionInfo) \
1558 macro(callReturnIndexVector)
1560 template<typename T>
1561 static size_t sizeInBytes(const Vector<T>& vector)
1563 return vector.capacity() * sizeof(T);
1568 class PutToScopeFireDetail : public FireDetail {
1570 PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
1571 : m_codeBlock(codeBlock)
1576 virtual void dump(PrintStream& out) const override
1578 out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
1582 CodeBlock* m_codeBlock;
1583 const Identifier& m_ident;
1586 } // anonymous namespace
1588 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1589 : m_globalObject(other.m_globalObject)
1590 , m_heap(other.m_heap)
1591 , m_numCalleeRegisters(other.m_numCalleeRegisters)
1592 , m_numVars(other.m_numVars)
1593 , m_isConstructor(other.m_isConstructor)
1594 , m_shouldAlwaysBeInlined(true)
1595 , m_didFailFTLCompilation(false)
1596 , m_hasBeenCompiledWithFTL(false)
1597 , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1598 , m_hasDebuggerStatement(false)
1599 , m_steppingMode(SteppingModeDisabled)
1600 , m_numBreakpoints(0)
1601 , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1603 , m_instructions(other.m_instructions)
1604 , m_thisRegister(other.m_thisRegister)
1605 , m_scopeRegister(other.m_scopeRegister)
1606 , m_lexicalEnvironmentRegister(other.m_lexicalEnvironmentRegister)
1607 , m_isStrictMode(other.m_isStrictMode)
1608 , m_needsActivation(other.m_needsActivation)
1609 , m_source(other.m_source)
1610 , m_sourceOffset(other.m_sourceOffset)
1611 , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1612 , m_codeType(other.m_codeType)
1613 , m_constantRegisters(other.m_constantRegisters)
1614 , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
1615 , m_functionDecls(other.m_functionDecls)
1616 , m_functionExprs(other.m_functionExprs)
1617 , m_osrExitCounter(0)
1618 , m_optimizationDelayCounter(0)
1619 , m_reoptimizationRetryCounter(0)
1620 , m_creationTime(std::chrono::steady_clock::now())
1621 , m_hash(other.m_hash)
1623 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1626 m_visitStronglyHasBeenCalled.store(false, std::memory_order_relaxed);
1627 m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1629 ASSERT(m_heap->isDeferred());
1630 ASSERT(m_scopeRegister.isLocal());
1632 setNumParameters(other.numParameters());
1633 optimizeAfterWarmUp();
1636 if (other.m_rareData) {
1637 createRareDataIfNecessary();
1639 m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1640 m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1641 m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1642 m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1645 m_heap->m_codeBlocks.add(this);
1646 m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock));
1649 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1650 : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1651 , m_heap(&m_globalObject->vm().heap)
1652 , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1653 , m_numVars(unlinkedCodeBlock->m_numVars)
1654 , m_isConstructor(unlinkedCodeBlock->isConstructor())
1655 , m_shouldAlwaysBeInlined(true)
1656 , m_didFailFTLCompilation(false)
1657 , m_hasBeenCompiledWithFTL(false)
1658 , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1659 , m_hasDebuggerStatement(false)
1660 , m_steppingMode(SteppingModeDisabled)
1661 , m_numBreakpoints(0)
1662 , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1663 , m_vm(unlinkedCodeBlock->vm())
1664 , m_thisRegister(unlinkedCodeBlock->thisRegister())
1665 , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
1666 , m_lexicalEnvironmentRegister(unlinkedCodeBlock->activationRegister())
1667 , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1668 , m_needsActivation(unlinkedCodeBlock->hasActivationRegister() && unlinkedCodeBlock->codeType() == FunctionCode)
1669 , m_source(sourceProvider)
1670 , m_sourceOffset(sourceOffset)
1671 , m_firstLineColumnOffset(firstLineColumnOffset)
1672 , m_codeType(unlinkedCodeBlock->codeType())
1673 , m_osrExitCounter(0)
1674 , m_optimizationDelayCounter(0)
1675 , m_reoptimizationRetryCounter(0)
1676 , m_creationTime(std::chrono::steady_clock::now())
1678 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1681 m_visitStronglyHasBeenCalled.store(false, std::memory_order_relaxed);
1682 m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1684 ASSERT(m_heap->isDeferred());
1685 ASSERT(m_scopeRegister.isLocal());
1688 setNumParameters(unlinkedCodeBlock->numParameters());
1690 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1691 vm()->functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(), ownerExecutable->typeProfilingEndOffset());
1693 setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
1694 if (unlinkedCodeBlock->usesGlobalObject())
1695 m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, ownerExecutable, m_globalObject.get());
1697 for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
1698 LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
1699 if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
1700 m_constantRegisters[registerIndex].set(*m_vm, ownerExecutable, m_globalObject->jsCellForLinkTimeConstant(type));
1703 HashSet<int, WTF::IntHash<int>, WTF::UnsignedWithZeroKeyHashTraits<int>> clonedConstantSymbolTables;
1705 HashSet<SymbolTable*> clonedSymbolTables;
1706 for (unsigned i = 0; i < m_constantRegisters.size(); i++) {
1707 if (m_constantRegisters[i].get().isEmpty())
1709 if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(m_constantRegisters[i].get())) {
1710 RELEASE_ASSERT(clonedSymbolTables.add(symbolTable).isNewEntry);
1711 if (m_vm->typeProfiler()) {
1712 ConcurrentJITLocker locker(symbolTable->m_lock);
1713 symbolTable->prepareForTypeProfiling(locker);
1715 m_constantRegisters[i].set(*m_vm, ownerExecutable, symbolTable->cloneScopePart(*m_vm));
1716 clonedConstantSymbolTables.add(i + FirstConstantRegisterIndex);
1721 // We already have the cloned symbol table for the module environment since we need to instantiate
1722 // the module environments before linking the code block. We replace the stored symbol table with the already cloned one.
1723 if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(unlinkedCodeBlock)) {
1724 SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable();
1725 if (m_vm->typeProfiler()) {
1726 ConcurrentJITLocker locker(clonedSymbolTable->m_lock);
1727 clonedSymbolTable->prepareForTypeProfiling(locker);
1729 replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable);
1732 m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
1733 for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1734 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1735 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1736 vm()->functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1737 m_functionDecls[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1740 m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
1741 for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1742 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1743 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1744 vm()->functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1745 m_functionExprs[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1748 if (unlinkedCodeBlock->hasRareData()) {
1749 createRareDataIfNecessary();
1750 if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1751 m_rareData->m_constantBuffers.grow(count);
1752 for (size_t i = 0; i < count; i++) {
1753 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1754 m_rareData->m_constantBuffers[i] = buffer;
1757 if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1758 m_rareData->m_exceptionHandlers.resizeToFit(count);
1759 for (size_t i = 0; i < count; i++) {
1760 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
1761 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
1763 handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
1765 handler.initialize(unlinkedHandler);
1770 if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1771 m_rareData->m_stringSwitchJumpTables.grow(count);
1772 for (size_t i = 0; i < count; i++) {
1773 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1774 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1775 for (; ptr != end; ++ptr) {
1776 OffsetLocation offset;
1777 offset.branchOffset = ptr->value;
1778 m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1783 if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1784 m_rareData->m_switchJumpTables.grow(count);
1785 for (size_t i = 0; i < count; i++) {
1786 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1787 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1788 destTable.branchOffsets = sourceTable.branchOffsets;
1789 destTable.min = sourceTable.min;
1794 // Allocate metadata buffers for the bytecode
1795 if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1796 m_llintCallLinkInfos.resizeToFit(size);
1797 if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1798 m_arrayProfiles.grow(size);
1799 if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1800 m_arrayAllocationProfiles.resizeToFit(size);
1801 if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1802 m_valueProfiles.resizeToFit(size);
1803 if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1804 m_objectAllocationProfiles.resizeToFit(size);
1807 setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters());
1810 // Copy and translate the UnlinkedInstructions
1811 unsigned instructionCount = unlinkedCodeBlock->instructions().count();
1812 UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
1814 // Bookkeep the strongly referenced module environments.
1815 HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments;
1817 Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1819 for (unsigned i = 0; !instructionReader.atEnd(); ) {
1820 const UnlinkedInstruction* pc = instructionReader.next();
1822 unsigned opLength = opcodeLength(pc[0].u.opcode);
1824 instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
1825 for (size_t j = 1; j < opLength; ++j) {
1826 if (sizeof(int32_t) != sizeof(intptr_t))
1827 instructions[i + j].u.pointer = 0;
1828 instructions[i + j].u.operand = pc[j].u.operand;
1830 switch (pc[0].u.opcode) {
1831 case op_has_indexed_property: {
1832 int arrayProfileIndex = pc[opLength - 1].u.operand;
1833 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1835 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1838 case op_call_varargs:
1839 case op_tail_call_varargs:
1840 case op_construct_varargs:
1841 case op_get_by_val: {
1842 int arrayProfileIndex = pc[opLength - 2].u.operand;
1843 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1845 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1848 case op_get_direct_pname:
1850 case op_get_from_arguments: {
1851 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1852 ASSERT(profile->m_bytecodeOffset == -1);
1853 profile->m_bytecodeOffset = i;
1854 instructions[i + opLength - 1] = profile;
1857 case op_put_by_val: {
1858 int arrayProfileIndex = pc[opLength - 1].u.operand;
1859 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1860 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1863 case op_put_by_val_direct: {
1864 int arrayProfileIndex = pc[opLength - 1].u.operand;
1865 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1866 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1871 case op_new_array_buffer:
1872 case op_new_array_with_size: {
1873 int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
1874 instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1877 case op_new_object: {
1878 int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
1879 ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1880 int inferredInlineCapacity = pc[opLength - 2].u.operand;
1882 instructions[i + opLength - 1] = objectAllocationProfile;
1883 objectAllocationProfile->initialize(*vm(),
1884 ownerExecutable, m_globalObject->objectPrototype(), inferredInlineCapacity);
1890 case op_call_eval: {
1891 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1892 ASSERT(profile->m_bytecodeOffset == -1);
1893 profile->m_bytecodeOffset = i;
1894 instructions[i + opLength - 1] = profile;
1895 int arrayProfileIndex = pc[opLength - 2].u.operand;
1896 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1897 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1898 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1901 case op_construct: {
1902 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1903 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1904 ASSERT(profile->m_bytecodeOffset == -1);
1905 profile->m_bytecodeOffset = i;
1906 instructions[i + opLength - 1] = profile;
1909 case op_get_array_length:
1912 case op_create_lexical_environment: {
1913 int symbolTableIndex = pc[3].u.operand;
1914 RELEASE_ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex));
1918 case op_resolve_scope: {
1919 const Identifier& ident = identifier(pc[3].u.operand);
1920 ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
1921 RELEASE_ASSERT(type != LocalClosureVar);
1922 int localScopeDepth = pc[5].u.operand;
1924 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, NotInitialization);
1925 instructions[i + 4].u.operand = op.type;
1926 instructions[i + 5].u.operand = op.depth;
1927 if (op.lexicalEnvironment) {
1928 if (op.type == ModuleVar) {
1929 // Keep the linked module environment strongly referenced.
1930 if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry)
1931 addConstant(op.lexicalEnvironment);
1932 instructions[i + 6].u.jsCell.set(*vm(), ownerExecutable, op.lexicalEnvironment);
1934 instructions[i + 6].u.symbolTable.set(*vm(), ownerExecutable, op.lexicalEnvironment->symbolTable());
1935 } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this))
1936 instructions[i + 6].u.jsCell.set(*vm(), ownerExecutable, constantScope);
1938 instructions[i + 6].u.pointer = nullptr;
1942 case op_get_from_scope: {
1943 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1944 ASSERT(profile->m_bytecodeOffset == -1);
1945 profile->m_bytecodeOffset = i;
1946 instructions[i + opLength - 1] = profile;
1948 // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand
1950 int localScopeDepth = pc[5].u.operand;
1951 instructions[i + 5].u.pointer = nullptr;
1953 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
1954 ASSERT(getPutInfo.initializationMode() == NotInitialization);
1955 if (getPutInfo.resolveType() == LocalClosureVar) {
1956 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
1960 const Identifier& ident = identifier(pc[3].u.operand);
1961 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), NotInitialization);
1963 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
1964 if (op.type == ModuleVar)
1965 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();
1966 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
1967 instructions[i + 5].u.watchpointSet = op.watchpointSet;
1968 else if (op.structure)
1969 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1970 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1974 case op_put_to_scope: {
1975 // put_to_scope scope, id, value, GetPutInfo, Structure, Operand
1976 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
1977 if (getPutInfo.resolveType() == LocalClosureVar) {
1978 // Only do watching if the property we're putting to is not anonymous.
1979 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
1980 int symbolTableIndex = pc[5].u.operand;
1981 RELEASE_ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex));
1982 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
1983 const Identifier& ident = identifier(pc[2].u.operand);
1984 ConcurrentJITLocker locker(symbolTable->m_lock);
1985 auto iter = symbolTable->find(locker, ident.impl());
1986 RELEASE_ASSERT(iter != symbolTable->end(locker));
1987 iter->value.prepareToWatch();
1988 instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
1990 instructions[i + 5].u.watchpointSet = nullptr;
1994 const Identifier& ident = identifier(pc[2].u.operand);
1995 int localScopeDepth = pc[5].u.operand;
1996 instructions[i + 5].u.pointer = nullptr;
1997 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode());
1999 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();
2000 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks)
2001 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2002 else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
2003 if (op.watchpointSet)
2004 op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident));
2005 } else if (op.structure)
2006 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
2007 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2012 case op_profile_type: {
2013 RELEASE_ASSERT(vm()->typeProfiler());
2014 // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
2015 size_t instructionOffset = i + opLength - 1;
2016 unsigned divotStart, divotEnd;
2017 GlobalVariableID globalVariableID = 0;
2018 RefPtr<TypeSet> globalTypeSet;
2019 bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
2020 VirtualRegister profileRegister(pc[1].u.operand);
2021 ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
2022 SymbolTable* symbolTable = nullptr;
2025 case ProfileTypeBytecodeClosureVar: {
2026 const Identifier& ident = identifier(pc[4].u.operand);
2027 int localScopeDepth = pc[2].u.operand;
2028 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
2029 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
2030 // we're abstractly "read"ing from a JSScope.
2031 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, NotInitialization);
2033 if (op.type == ClosureVar || op.type == ModuleVar)
2034 symbolTable = op.lexicalEnvironment->symbolTable();
2035 else if (op.type == GlobalVar)
2036 symbolTable = m_globalObject.get()->symbolTable();
2038 UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl();
2040 ConcurrentJITLocker locker(symbolTable->m_lock);
2041 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2042 symbolTable->prepareForTypeProfiling(locker);
2043 globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, *vm());
2044 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, *vm());
2046 globalVariableID = TypeProfilerNoGlobalIDExists;
2050 case ProfileTypeBytecodeLocallyResolved: {
2051 int symbolTableIndex = pc[2].u.operand;
2052 RELEASE_ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex));
2053 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
2054 const Identifier& ident = identifier(pc[4].u.operand);
2055 ConcurrentJITLocker locker(symbolTable->m_lock);
2056 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2057 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2058 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2062 case ProfileTypeBytecodeDoesNotHaveGlobalID:
2063 case ProfileTypeBytecodeFunctionArgument: {
2064 globalVariableID = TypeProfilerNoGlobalIDExists;
2067 case ProfileTypeBytecodeFunctionReturnStatement: {
2068 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
2069 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
2070 globalVariableID = TypeProfilerReturnStatement;
2071 if (!shouldAnalyze) {
2072 // Because a return statement can be added implicitly to return undefined at the end of a function,
2073 // and these nodes don't emit expression ranges because they aren't in the actual source text of
2074 // the user's program, give the type profiler some range to identify these return statements.
2075 // Currently, the text offset that is used as identification is "f" in the function keyword
2076 // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
2077 divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset();
2078 shouldAnalyze = true;
2084 std::pair<TypeLocation*, bool> locationPair = vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
2085 ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, vm());
2086 TypeLocation* location = locationPair.first;
2087 bool isNewLocation = locationPair.second;
2089 if (flag == ProfileTypeBytecodeFunctionReturnStatement)
2090 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset();
2092 if (shouldAnalyze && isNewLocation)
2093 vm()->typeProfiler()->insertNewLocation(location);
2095 instructions[i + 2].u.location = location;
2100 if (pc[1].u.index == DidReachBreakpoint)
2101 m_hasDebuggerStatement = true;
2111 if (vm()->controlFlowProfiler())
2112 insertBasicBlockBoundariesForControlFlowProfiler(instructions);
2114 m_instructions = WTF::RefCountedArray<Instruction>(instructions);
2116 // Set optimization thresholds only after m_instructions is initialized, since these
2117 // rely on the instruction count (and are in theory permitted to also inspect the
2118 // instruction stream to more accurate assess the cost of tier-up).
2119 optimizeAfterWarmUp();
2122 // If the concurrent thread will want the code block's hash, then compute it here
2124 if (Options::alwaysComputeHash())
2127 if (Options::dumpGeneratedBytecodes())
2130 m_heap->m_codeBlocks.add(this);
2131 m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
2134 #if ENABLE(WEBASSEMBLY)
2135 CodeBlock::CodeBlock(WebAssemblyExecutable* ownerExecutable, VM& vm, JSGlobalObject* globalObject)
2136 : m_globalObject(globalObject->vm(), ownerExecutable, globalObject)
2137 , m_heap(&m_globalObject->vm().heap)
2138 , m_numCalleeRegisters(0)
2140 , m_isConstructor(false)
2141 , m_shouldAlwaysBeInlined(false)
2142 , m_didFailFTLCompilation(false)
2143 , m_hasBeenCompiledWithFTL(false)
2144 , m_hasDebuggerStatement(false)
2145 , m_steppingMode(SteppingModeDisabled)
2146 , m_numBreakpoints(0)
2147 , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
2149 , m_isStrictMode(false)
2150 , m_needsActivation(false)
2151 , m_codeType(FunctionCode)
2152 , m_osrExitCounter(0)
2153 , m_optimizationDelayCounter(0)
2154 , m_reoptimizationRetryCounter(0)
2155 , m_creationTime(std::chrono::steady_clock::now())
2157 , m_capabilityLevelState(DFG::CannotCompile)
2160 ASSERT(m_heap->isDeferred());
2162 m_heap->m_codeBlocks.add(this);
2163 m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock));
2167 CodeBlock::~CodeBlock()
2169 if (m_vm->m_perBytecodeProfiler)
2170 m_vm->m_perBytecodeProfiler->notifyDestruction(this);
2172 #if ENABLE(VERBOSE_VALUE_PROFILE)
2173 dumpValueProfiles();
2175 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2176 m_incomingLLIntCalls.begin()->remove();
2178 // We may be destroyed before any CodeBlocks that refer to us are destroyed.
2179 // Consider that two CodeBlocks become unreachable at the same time. There
2180 // is no guarantee about the order in which the CodeBlocks are destroyed.
2181 // So, if we don't remove incoming calls, and get destroyed before the
2182 // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
2183 // destructor will try to remove nodes from our (no longer valid) linked list.
2184 while (m_incomingCalls.begin() != m_incomingCalls.end())
2185 m_incomingCalls.begin()->remove();
2186 while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
2187 m_incomingPolymorphicCalls.begin()->remove();
2189 // Note that our outgoing calls will be removed from other CodeBlocks'
2190 // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
2193 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
2195 #endif // ENABLE(JIT)
2198 void CodeBlock::setNumParameters(int newValue)
2200 m_numParameters = newValue;
2202 m_argumentValueProfiles.resizeToFit(newValue);
2205 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
2207 EvalCacheMap::iterator end = m_cacheMap.end();
2208 for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
2209 visitor.append(&ptr->value);
2212 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
2215 if (jitType() != JITCode::DFGJIT)
2217 DFG::JITCode* jitCode = m_jitCode->dfg();
2218 return jitCode->osrEntryBlock.get();
2219 #else // ENABLE(FTL_JIT)
2221 #endif // ENABLE(FTL_JIT)
2224 void CodeBlock::visitStrongly(SlotVisitor& visitor)
2226 bool setByMe = m_visitStronglyHasBeenCalled.compareExchangeStrong(false, true);
2230 visitAggregate(visitor);
2232 stronglyVisitStrongReferences(visitor);
2233 stronglyVisitWeakReferences(visitor);
2234 propagateTransitions(visitor);
2237 void CodeBlock::visitAggregate(SlotVisitor& visitor)
2239 // I may be asked to scan myself more than once, and it may even happen concurrently.
2240 // To this end, use an atomic operation to check (and set) if I've been called already.
2241 // Only one thread may proceed past this point - whichever one wins the atomic set race.
2242 bool setByMe = m_visitAggregateHasBeenCalled.compareExchangeStrong(false, true);
2246 if (!!m_alternative)
2247 m_alternative->visitAggregate(visitor);
2249 if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2250 otherBlock->visitAggregate(visitor);
2252 visitor.reportExtraMemoryVisited(ownerExecutable(), sizeof(CodeBlock));
2254 visitor.reportExtraMemoryVisited(ownerExecutable(), m_jitCode->size());
2255 if (m_instructions.size()) {
2256 // Divide by refCount() because m_instructions points to something that is shared
2257 // by multiple CodeBlocks, and we only want to count it towards the heap size once.
2258 // Having each CodeBlock report only its proportional share of the size is one way
2259 // of accomplishing this.
2260 visitor.reportExtraMemoryVisited(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
2263 visitor.append(&m_unlinkedCode);
2265 // There are two things that may use unconditional finalizers: inline cache clearing
2266 // and jettisoning. The probability of us wanting to do at least one of those things
2267 // is probably quite close to 1. So we add one no matter what and when it runs, it
2268 // figures out whether it has any work to do.
2269 visitor.addUnconditionalFinalizer(this);
2271 m_allTransitionsHaveBeenMarked = false;
2273 if (shouldVisitStrongly()) {
2274 visitStrongly(visitor);
2278 if (!JITCode::isOptimizingJIT(jitType()))
2281 // There are two things that we use weak reference harvesters for: DFG fixpoint for
2282 // jettisoning, and trying to find structures that would be live based on some
2283 // inline cache. So it makes sense to register them regardless.
2284 visitor.addWeakReferenceHarvester(this);
2287 // We get here if we're live in the sense that our owner executable is live,
2288 // but we're not yet live for sure in another sense: we may yet decide that this
2289 // code block should be jettisoned based on its outgoing weak references being
2290 // stale. Set a flag to indicate that we're still assuming that we're dead, and
2291 // perform one round of determining if we're live. The GC may determine, based on
2292 // either us marking additional objects, or by other objects being marked for
2293 // other reasons, that this iteration should run again; it will notify us of this
2294 // decision by calling harvestWeakReferences().
2296 m_jitCode->dfgCommon()->livenessHasBeenProved = false;
2298 propagateTransitions(visitor);
2299 determineLiveness(visitor);
2300 #endif // ENABLE(DFG_JIT)
2303 bool CodeBlock::shouldVisitStrongly()
2305 if (Options::forceCodeBlockLiveness())
2308 if (shouldJettisonDueToOldAge())
2311 // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
2312 // their weak references go stale. So if a basline JIT CodeBlock gets
2313 // scanned, we can assume that this means that it's live.
2314 if (!JITCode::isOptimizingJIT(jitType()))
2320 bool CodeBlock::isKnownToBeLiveDuringGC()
2322 // This should return true for:
2323 // - Code blocks that behave like normal objects - i.e. if they are referenced then they
2325 // - Code blocks that were running on the stack.
2326 // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
2327 // because livenessHasBeenProved would have survived as true.
2328 // - Code blocks that don't have any dead weak references.
2330 if (m_visitStronglyHasBeenCalled.load(std::memory_order_relaxed))
2334 if (JITCode::isOptimizingJIT(jitType())) {
2335 if (m_jitCode->dfgCommon()->livenessHasBeenProved)
2343 bool CodeBlock::shouldJettisonDueToWeakReference()
2345 if (!JITCode::isOptimizingJIT(jitType()))
2347 return !isKnownToBeLiveDuringGC();
2350 bool CodeBlock::shouldJettisonDueToOldAge()
2352 if (m_visitStronglyHasBeenCalled.load(std::memory_order_relaxed))
2355 if (timeSinceCreation() < JITCode::timeToLive(jitType()))
2362 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
2364 if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
2367 if (!Heap::isMarked(transition.m_from.get()))
2372 #endif // ENABLE(DFG_JIT)
2374 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
2376 UNUSED_PARAM(visitor);
2378 if (m_allTransitionsHaveBeenMarked)
2381 bool allAreMarkedSoFar = true;
2383 Interpreter* interpreter = m_vm->interpreter;
2384 if (jitType() == JITCode::InterpreterThunk) {
2385 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2386 for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2387 Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2388 switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2389 case op_put_by_id: {
2390 StructureID oldStructureID = instruction[4].u.structureID;
2391 StructureID newStructureID = instruction[6].u.structureID;
2392 if (!oldStructureID || !newStructureID)
2394 Structure* oldStructure =
2395 m_vm->heap.structureIDTable().get(oldStructureID);
2396 Structure* newStructure =
2397 m_vm->heap.structureIDTable().get(newStructureID);
2398 if (Heap::isMarked(oldStructure))
2399 visitor.appendUnbarrieredReadOnlyPointer(newStructure);
2401 allAreMarkedSoFar = false;
2411 if (JITCode::isJIT(jitType())) {
2412 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2413 StructureStubInfo& stubInfo = **iter;
2414 if (stubInfo.cacheType != CacheType::Stub)
2416 PolymorphicAccess* list = stubInfo.u.stub;
2417 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2418 if (origin && !Heap::isMarked(origin)) {
2419 allAreMarkedSoFar = false;
2422 for (unsigned j = list->size(); j--;) {
2423 const AccessCase& access = list->at(j);
2424 if (access.type() != AccessCase::Transition)
2426 if (Heap::isMarked(access.structure()))
2427 visitor.appendUnbarrieredReadOnlyPointer(access.newStructure());
2429 allAreMarkedSoFar = false;
2433 #endif // ENABLE(JIT)
2436 if (JITCode::isOptimizingJIT(jitType())) {
2437 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2439 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2440 if (shouldMarkTransition(dfgCommon->transitions[i])) {
2441 // If the following three things are live, then the target of the
2442 // transition is also live:
2444 // - This code block. We know it's live already because otherwise
2445 // we wouldn't be scanning ourselves.
2447 // - The code origin of the transition. Transitions may arise from
2448 // code that was inlined. They are not relevant if the user's
2449 // object that is required for the inlinee to run is no longer
2452 // - The source of the transition. The transition checks if some
2453 // heap location holds the source, and if so, stores the target.
2454 // Hence the source must be live for the transition to be live.
2456 // We also short-circuit the liveness if the structure is harmless
2457 // to mark (i.e. its global object and prototype are both already
2460 visitor.append(&dfgCommon->transitions[i].m_to);
2462 allAreMarkedSoFar = false;
2465 #endif // ENABLE(DFG_JIT)
2467 if (allAreMarkedSoFar)
2468 m_allTransitionsHaveBeenMarked = true;
2471 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2473 UNUSED_PARAM(visitor);
2476 // Check if we have any remaining work to do.
2477 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2478 if (dfgCommon->livenessHasBeenProved)
2481 // Now check all of our weak references. If all of them are live, then we
2482 // have proved liveness and so we scan our strong references. If at end of
2483 // GC we still have not proved liveness, then this code block is toast.
2484 bool allAreLiveSoFar = true;
2485 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2486 if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2487 allAreLiveSoFar = false;
2491 if (allAreLiveSoFar) {
2492 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
2493 if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
2494 allAreLiveSoFar = false;
2500 // If some weak references are dead, then this fixpoint iteration was
2502 if (!allAreLiveSoFar)
2505 // All weak references are live. Record this information so we don't
2506 // come back here again, and scan the strong references.
2507 dfgCommon->livenessHasBeenProved = true;
2508 stronglyVisitStrongReferences(visitor);
2509 #endif // ENABLE(DFG_JIT)
2512 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2514 propagateTransitions(visitor);
2515 determineLiveness(visitor);
2518 void CodeBlock::finalizeLLIntInlineCaches()
2520 #if ENABLE(WEBASSEMBLY)
2521 if (m_ownerExecutable->isWebAssemblyExecutable())
2525 Interpreter* interpreter = m_vm->interpreter;
2526 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2527 for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2528 Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2529 switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2530 case op_get_by_id: {
2531 StructureID oldStructureID = curInstruction[4].u.structureID;
2532 if (!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID)))
2534 if (Options::verboseOSR())
2535 dataLogF("Clearing LLInt property access.\n");
2536 curInstruction[4].u.structureID = 0;
2537 curInstruction[5].u.operand = 0;
2540 case op_put_by_id: {
2541 StructureID oldStructureID = curInstruction[4].u.structureID;
2542 StructureID newStructureID = curInstruction[6].u.structureID;
2543 StructureChain* chain = curInstruction[7].u.structureChain.get();
2544 if ((!oldStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(oldStructureID))) &&
2545 (!newStructureID || Heap::isMarked(m_vm->heap.structureIDTable().get(newStructureID))) &&
2546 (!chain || Heap::isMarked(chain)))
2548 if (Options::verboseOSR())
2549 dataLogF("Clearing LLInt put transition.\n");
2550 curInstruction[4].u.structureID = 0;
2551 curInstruction[5].u.operand = 0;
2552 curInstruction[6].u.structureID = 0;
2553 curInstruction[7].u.structureChain.clear();
2556 case op_get_array_length:
2559 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2561 if (Options::verboseOSR())
2562 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2563 curInstruction[2].u.structure.clear();
2564 curInstruction[3].u.toThisStatus = merge(
2565 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
2567 case op_create_this: {
2568 auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
2569 if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
2571 JSCell* cachedFunction = cacheWriteBarrier.get();
2572 if (Heap::isMarked(cachedFunction))
2574 if (Options::verboseOSR())
2575 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
2576 cacheWriteBarrier.clear();
2579 case op_resolve_scope: {
2580 // Right now this isn't strictly necessary. Any symbol tables that this will refer to
2581 // are for outer functions, and we refer to those functions strongly, and they refer
2582 // to the symbol table strongly. But it's nice to be on the safe side.
2583 WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
2584 if (!symbolTable || Heap::isMarked(symbolTable.get()))
2586 if (Options::verboseOSR())
2587 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
2588 symbolTable.clear();
2591 case op_get_from_scope:
2592 case op_put_to_scope: {
2593 GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand);
2594 if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks
2595 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks)
2597 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2598 if (!structure || Heap::isMarked(structure.get()))
2600 if (Options::verboseOSR())
2601 dataLogF("Clearing scope access with structure %p.\n", structure.get());
2606 OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
2607 ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
2611 for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2612 if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2613 if (Options::verboseOSR())
2614 dataLog("Clearing LLInt call from ", *this, "\n");
2615 m_llintCallLinkInfos[i].unlink();
2617 if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2618 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2622 void CodeBlock::finalizeBaselineJITInlineCaches()
2625 for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
2626 (*iter)->visitWeak(*vm());
2628 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2629 StructureStubInfo& stubInfo = **iter;
2630 stubInfo.visitWeakReferences(this);
2635 void CodeBlock::finalizeUnconditionally()
2638 if (shouldJettisonDueToWeakReference()) {
2639 jettison(Profiler::JettisonDueToWeakReference);
2642 #endif // ENABLE(DFG_JIT)
2644 if (shouldJettisonDueToOldAge()) {
2645 jettison(Profiler::JettisonDueToOldAge);
2649 if (JITCode::couldBeInterpreted(jitType()))
2650 finalizeLLIntInlineCaches();
2654 finalizeBaselineJITInlineCaches();
2658 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
2661 toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2663 UNUSED_PARAM(result);
2667 void CodeBlock::getStubInfoMap(StubInfoMap& result)
2669 ConcurrentJITLocker locker(m_lock);
2670 getStubInfoMap(locker, result);
2673 void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
2676 toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
2678 UNUSED_PARAM(result);
2682 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
2684 ConcurrentJITLocker locker(m_lock);
2685 getCallLinkInfoMap(locker, result);
2688 void CodeBlock::getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result)
2691 for (auto* byValInfo : m_byValInfos)
2692 result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
2694 UNUSED_PARAM(result);
2698 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
2700 ConcurrentJITLocker locker(m_lock);
2701 getByValInfoMap(locker, result);
2705 StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
2707 ConcurrentJITLocker locker(m_lock);
2708 return m_stubInfos.add(accessType);
2711 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
2713 for (StructureStubInfo* stubInfo : m_stubInfos) {
2714 if (stubInfo->codeOrigin == codeOrigin)
2720 ByValInfo* CodeBlock::addByValInfo()
2722 ConcurrentJITLocker locker(m_lock);
2723 return m_byValInfos.add();
2726 CallLinkInfo* CodeBlock::addCallLinkInfo()
2728 ConcurrentJITLocker locker(m_lock);
2729 return m_callLinkInfos.add();
2732 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
2734 for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2735 if ((*iter)->codeOrigin() == CodeOrigin(index))
2742 void CodeBlock::visitOSRExitTargets(SlotVisitor& visitor)
2744 // We strongly visit OSR exits targets because we don't want to deal with
2745 // the complexity of generating an exit target CodeBlock on demand and
2746 // guaranteeing that it matches the details of the CodeBlock we compiled
2747 // the OSR exit against.
2749 alternative()->visitStrongly(visitor);
2752 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2753 if (dfgCommon->inlineCallFrames) {
2754 for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) {
2755 ASSERT(inlineCallFrame->baselineCodeBlock());
2756 inlineCallFrame->baselineCodeBlock()->visitStrongly(visitor);
2762 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2764 visitor.append(&m_globalObject);
2765 visitor.append(&m_ownerExecutable);
2766 visitor.append(&m_unlinkedCode);
2768 m_rareData->m_evalCodeCache.visitAggregate(visitor);
2769 visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2770 for (size_t i = 0; i < m_functionExprs.size(); ++i)
2771 visitor.append(&m_functionExprs[i]);
2772 for (size_t i = 0; i < m_functionDecls.size(); ++i)
2773 visitor.append(&m_functionDecls[i]);
2774 for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2775 m_objectAllocationProfiles[i].visitAggregate(visitor);
2778 if (JITCode::isOptimizingJIT(jitType()))
2779 visitOSRExitTargets(visitor);
2782 updateAllPredictions();
2785 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2787 UNUSED_PARAM(visitor);
2790 if (!JITCode::isOptimizingJIT(jitType()))
2793 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2795 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2796 if (!!dfgCommon->transitions[i].m_codeOrigin)
2797 visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2798 visitor.append(&dfgCommon->transitions[i].m_from);
2799 visitor.append(&dfgCommon->transitions[i].m_to);
2802 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2803 visitor.append(&dfgCommon->weakReferences[i]);
2805 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
2806 visitor.append(&dfgCommon->weakStructureReferences[i]);
2808 dfgCommon->livenessHasBeenProved = true;
2812 CodeBlock* CodeBlock::baselineAlternative()
2815 CodeBlock* result = this;
2816 while (result->alternative())
2817 result = result->alternative();
2818 RELEASE_ASSERT(result);
2819 RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
2826 CodeBlock* CodeBlock::baselineVersion()
2829 if (JITCode::isBaselineCode(jitType()))
2831 CodeBlock* result = replacement();
2833 // This can happen if we're creating the original CodeBlock for an executable.
2834 // Assume that we're the baseline CodeBlock.
2835 RELEASE_ASSERT(jitType() == JITCode::None);
2838 result = result->baselineAlternative();
2846 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
2848 return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
2851 bool CodeBlock::hasOptimizedReplacement()
2853 return hasOptimizedReplacement(jitType());
2857 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
2859 RELEASE_ASSERT(bytecodeOffset < instructions().size());
2860 return handlerForIndex(bytecodeOffset, requiredHandler);
2863 HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler)
2868 Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2869 for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2870 HandlerInfo& handler = exceptionHandlers[i];
2871 if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
2874 // Handlers are ordered innermost first, so the first handler we encounter
2875 // that contains the source address is the correct handler to use.
2876 // This index used is either the BytecodeOffset or a CallSiteIndex.
2877 if (handler.start <= index && handler.end > index)
2884 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2886 RELEASE_ASSERT(bytecodeOffset < instructions().size());
2887 return ownerScriptExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2890 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2897 expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2901 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2903 m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2904 divot += m_sourceOffset;
2905 column += line ? 1 : firstLineColumnOffset();
2906 line += ownerScriptExecutable()->firstLine();
2909 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
2911 Interpreter* interpreter = vm()->interpreter;
2912 const Instruction* begin = instructions().begin();
2913 const Instruction* end = instructions().end();
2914 for (const Instruction* it = begin; it != end;) {
2915 OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
2916 if (opcodeID == op_debug) {
2917 unsigned bytecodeOffset = it - begin;
2919 unsigned opDebugLine;
2920 unsigned opDebugColumn;
2921 expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
2922 if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
2925 it += opcodeLengths[opcodeID];
2930 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2932 m_rareCaseProfiles.shrinkToFit();
2933 m_specialFastCaseProfiles.shrinkToFit();
2935 if (shrinkMode == EarlyShrink) {
2936 m_constantRegisters.shrinkToFit();
2937 m_constantsSourceCodeRepresentation.shrinkToFit();
2940 m_rareData->m_switchJumpTables.shrinkToFit();
2941 m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2943 } // else don't shrink these, because we would have already pointed pointers into these tables.
2947 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
2949 noticeIncomingCall(callerFrame);
2950 m_incomingCalls.push(incoming);
2953 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
2955 noticeIncomingCall(callerFrame);
2956 m_incomingPolymorphicCalls.push(incoming);
2958 #endif // ENABLE(JIT)
2960 void CodeBlock::unlinkIncomingCalls()
2962 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2963 m_incomingLLIntCalls.begin()->unlink();
2965 if (m_incomingCalls.isEmpty() && m_incomingPolymorphicCalls.isEmpty())
2967 while (m_incomingCalls.begin() != m_incomingCalls.end())
2968 m_incomingCalls.begin()->unlink(*vm());
2969 while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
2970 m_incomingPolymorphicCalls.begin()->unlink(*vm());
2971 #endif // ENABLE(JIT)
2974 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
2976 noticeIncomingCall(callerFrame);
2977 m_incomingLLIntCalls.push(incoming);
2980 PassRefPtr<CodeBlock> CodeBlock::newReplacement()
2982 return ownerScriptExecutable()->newReplacementCodeBlockFor(specializationKind());
2986 CodeBlock* ProgramCodeBlock::replacement()
2988 return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
2991 CodeBlock* ModuleProgramCodeBlock::replacement()
2993 return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock();
2996 CodeBlock* EvalCodeBlock::replacement()
2998 return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
3001 CodeBlock* FunctionCodeBlock::replacement()
3003 return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
3006 DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
3008 return DFG::programCapabilityLevel(this);
3011 DFG::CapabilityLevel ModuleProgramCodeBlock::capabilityLevelInternal()
3013 return DFG::programCapabilityLevel(this);
3016 DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
3018 return DFG::evalCapabilityLevel(this);
3021 DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
3023 if (m_isConstructor)
3024 return DFG::functionForConstructCapabilityLevel(this);
3025 return DFG::functionForCallCapabilityLevel(this);
3028 #if ENABLE(WEBASSEMBLY)
3029 CodeBlock* WebAssemblyCodeBlock::replacement()
3034 DFG::CapabilityLevel WebAssemblyCodeBlock::capabilityLevelInternal()
3036 return DFG::CannotCompile;
3041 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
3043 #if !ENABLE(DFG_JIT)
3045 UNUSED_PARAM(detail);
3048 RELEASE_ASSERT(reason != Profiler::NotJettisoned);
3051 if (DFG::shouldShowDisassembly()) {
3052 dataLog("Jettisoning ", *this);
3053 if (mode == CountReoptimization)
3054 dataLog(" and counting reoptimization");
3055 dataLog(" due to ", reason);
3057 dataLog(", ", *detail);
3061 if (reason == Profiler::JettisonDueToWeakReference) {
3062 if (DFG::shouldShowDisassembly()) {
3063 dataLog(*this, " will be jettisoned because of the following dead references:\n");
3064 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
3065 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
3066 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
3067 JSCell* origin = transition.m_codeOrigin.get();
3068 JSCell* from = transition.m_from.get();
3069 JSCell* to = transition.m_to.get();
3070 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
3072 dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
3074 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
3075 JSCell* weak = dfgCommon->weakReferences[i].get();
3076 if (Heap::isMarked(weak))
3078 dataLog(" Weak reference ", RawPointer(weak), ".\n");
3082 #endif // ENABLE(DFG_JIT)
3084 DeferGCForAWhile deferGC(*m_heap);
3086 // We want to accomplish two things here:
3087 // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
3088 // we should OSR exit at the top of the next bytecode instruction after the return.
3089 // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
3092 if (reason != Profiler::JettisonDueToOldAge) {
3093 if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
3094 compilation->setJettisonReason(reason, detail);
3096 // This accomplishes (1), and does its own book-keeping about whether it has already happened.
3097 if (!jitCode()->dfgCommon()->invalidate()) {
3098 // We've already been invalidated.
3099 RELEASE_ASSERT(this != replacement());
3104 if (DFG::shouldShowDisassembly())
3105 dataLog(" Did invalidate ", *this, "\n");
3107 // Count the reoptimization if that's what the user wanted.
3108 if (mode == CountReoptimization) {
3109 // FIXME: Maybe this should call alternative().
3110 // https://bugs.webkit.org/show_bug.cgi?id=123677
3111 baselineAlternative()->countReoptimization();
3112 if (DFG::shouldShowDisassembly())
3113 dataLog(" Did count reoptimization for ", *this, "\n");
3116 if (this != replacement()) {
3117 // This means that we were never the entrypoint. This can happen for OSR entry code
3123 alternative()->optimizeAfterWarmUp();
3125 if (reason != Profiler::JettisonDueToOldAge)
3126 tallyFrequentExitSites();
3127 #endif // ENABLE(DFG_JIT)
3129 // This accomplishes (2).
3130 ownerScriptExecutable()->installCode(
3131 m_globalObject->vm(), alternative(), codeType(), specializationKind());
3134 if (DFG::shouldShowDisassembly())
3135 dataLog(" Did install baseline version of ", *this, "\n");
3136 #endif // ENABLE(DFG_JIT)
3139 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
3141 if (!codeOrigin.inlineCallFrame)
3142 return globalObject();
3143 return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
3146 class RecursionCheckFunctor {
3148 RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
3149 : m_startCallFrame(startCallFrame)
3150 , m_codeBlock(codeBlock)
3151 , m_depthToCheck(depthToCheck)
3152 , m_foundStartCallFrame(false)
3153 , m_didRecurse(false)
3156 StackVisitor::Status operator()(StackVisitor& visitor)
3158 CallFrame* currentCallFrame = visitor->callFrame();
3160 if (currentCallFrame == m_startCallFrame)
3161 m_foundStartCallFrame = true;
3163 if (m_foundStartCallFrame) {
3164 if (visitor->callFrame()->codeBlock() == m_codeBlock) {
3165 m_didRecurse = true;
3166 return StackVisitor::Done;
3169 if (!m_depthToCheck--)
3170 return StackVisitor::Done;
3173 return StackVisitor::Continue;
3176 bool didRecurse() const { return m_didRecurse; }
3179 CallFrame* m_startCallFrame;
3180 CodeBlock* m_codeBlock;
3181 unsigned m_depthToCheck;
3182 bool m_foundStartCallFrame;
3186 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
3188 CodeBlock* callerCodeBlock = callerFrame->codeBlock();
3190 if (Options::verboseCallLink())
3191 dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
3194 if (!m_shouldAlwaysBeInlined)
3197 if (!callerCodeBlock) {
3198 m_shouldAlwaysBeInlined = false;
3199 if (Options::verboseCallLink())
3200 dataLog(" Clearing SABI because caller is native.\n");
3204 if (!hasBaselineJITProfiling())
3207 if (!DFG::mightInlineFunction(this))
3210 if (!canInline(m_capabilityLevelState))
3213 if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
3214 m_shouldAlwaysBeInlined = false;
3215 if (Options::verboseCallLink())
3216 dataLog(" Clearing SABI because caller is too large.\n");
3220 if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
3221 // If the caller is still in the interpreter, then we can't expect inlining to
3222 // happen anytime soon. Assume it's profitable to optimize it separately. This
3223 // ensures that a function is SABI only if it is called no more frequently than
3224 // any of its callers.
3225 m_shouldAlwaysBeInlined = false;
3226 if (Options::verboseCallLink())
3227 dataLog(" Clearing SABI because caller is in LLInt.\n");