2 * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "BasicBlockLocation.h"
34 #include "BytecodeGenerator.h"
35 #include "BytecodeUseDef.h"
36 #include "CallLinkStatus.h"
37 #include "DFGCapabilities.h"
38 #include "DFGCommon.h"
39 #include "DFGDriver.h"
40 #include "DFGJITCode.h"
41 #include "DFGWorklist.h"
43 #include "FunctionExecutableDump.h"
44 #include "Interpreter.h"
47 #include "JSCJSValue.h"
48 #include "JSFunction.h"
49 #include "JSLexicalEnvironment.h"
50 #include "JSNameScope.h"
51 #include "LLIntEntrypoint.h"
52 #include "LowLevelInterpreter.h"
53 #include "JSCInlines.h"
54 #include "PolymorphicGetByIdList.h"
55 #include "PolymorphicPutByIdList.h"
56 #include "ProfilerDatabase.h"
57 #include "ReduceWhitespace.h"
59 #include "RepatchBuffer.h"
60 #include "SlotVisitorInlines.h"
61 #include "StackVisitor.h"
62 #include "TypeLocationCache.h"
63 #include "TypeProfiler.h"
64 #include "UnlinkedInstructionStream.h"
65 #include <wtf/BagToHashMap.h>
66 #include <wtf/CommaPrinter.h>
67 #include <wtf/StringExtras.h>
68 #include <wtf/StringPrintStream.h>
69 #include <wtf/text/UniquedStringImpl.h>
72 #include "DFGOperations.h"
76 #include "FTLJITCode.h"
81 CString CodeBlock::inferredName() const
89 return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
92 return CString("", 0);
96 bool CodeBlock::hasHash() const
101 bool CodeBlock::isSafeToComputeHash() const
103 return !isCompilationThread();
106 CodeBlockHash CodeBlock::hash() const
109 RELEASE_ASSERT(isSafeToComputeHash());
110 m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
115 CString CodeBlock::sourceCodeForTools() const
117 if (codeType() != FunctionCode)
118 return ownerExecutable()->source().toUTF8();
120 SourceProvider* provider = source();
121 FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
122 UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
123 unsigned unlinkedStartOffset = unlinked->startOffset();
124 unsigned linkedStartOffset = executable->source().startOffset();
125 int delta = linkedStartOffset - unlinkedStartOffset;
126 unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
127 unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
130 provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
133 CString CodeBlock::sourceCodeOnOneLine() const
135 return reduceWhitespace(sourceCodeForTools());
138 CString CodeBlock::hashAsStringIfPossible() const
140 if (hasHash() || isSafeToComputeHash())
141 return toCString(hash());
145 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
147 out.print(inferredName(), "#", hashAsStringIfPossible());
148 out.print(":[", RawPointer(this), "->");
150 out.print(RawPointer(m_alternative.get()), "->");
151 out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
153 if (codeType() == FunctionCode)
154 out.print(specializationKind());
155 out.print(", ", instructionCount());
156 if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
157 out.print(" (ShouldAlwaysBeInlined)");
158 if (ownerExecutable()->neverInline())
159 out.print(" (NeverInline)");
160 if (ownerExecutable()->didTryToEnterInLoop())
161 out.print(" (DidTryToEnterInLoop)");
162 if (ownerExecutable()->isStrictMode())
163 out.print(" (StrictMode)");
164 if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
165 out.print(" (FTLFail)");
166 if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
167 out.print(" (HadFTLReplacement)");
171 void CodeBlock::dump(PrintStream& out) const
173 dumpAssumingJITType(out, jitType());
176 static CString idName(int id0, const Identifier& ident)
178 return toCString(ident.impl(), "(@id", id0, ")");
181 CString CodeBlock::registerName(int r) const
183 if (isConstantRegisterIndex(r))
184 return constantName(r);
186 return toCString(VirtualRegister(r));
189 CString CodeBlock::constantName(int index) const
191 JSValue value = getConstant(index);
192 return toCString(value, "(", VirtualRegister(index), ")");
195 static CString regexpToSourceString(RegExp* regExp)
197 char postfix[5] = { '/', 0, 0, 0, 0 };
199 if (regExp->global())
200 postfix[index++] = 'g';
201 if (regExp->ignoreCase())
202 postfix[index++] = 'i';
203 if (regExp->multiline())
204 postfix[index] = 'm';
206 return toCString("/", regExp->pattern().impl(), postfix);
209 static CString regexpName(int re, RegExp* regexp)
211 return toCString(regexpToSourceString(regexp), "(@re", re, ")");
214 NEVER_INLINE static const char* debugHookName(int debugHookID)
216 switch (static_cast<DebugHookID>(debugHookID)) {
217 case DidEnterCallFrame:
218 return "didEnterCallFrame";
219 case WillLeaveCallFrame:
220 return "willLeaveCallFrame";
221 case WillExecuteStatement:
222 return "willExecuteStatement";
223 case WillExecuteProgram:
224 return "willExecuteProgram";
225 case DidExecuteProgram:
226 return "didExecuteProgram";
227 case DidReachBreakpoint:
228 return "didReachBreakpoint";
231 RELEASE_ASSERT_NOT_REACHED();
235 void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
237 int r0 = (++it)->u.operand;
238 int r1 = (++it)->u.operand;
240 printLocationAndOp(out, exec, location, it, op);
241 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
244 void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
246 int r0 = (++it)->u.operand;
247 int r1 = (++it)->u.operand;
248 int r2 = (++it)->u.operand;
249 printLocationAndOp(out, exec, location, it, op);
250 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
253 void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
255 int r0 = (++it)->u.operand;
256 int offset = (++it)->u.operand;
257 printLocationAndOp(out, exec, location, it, op);
258 out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
261 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
264 switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
268 case op_get_by_id_out_of_line:
269 op = "get_by_id_out_of_line";
271 case op_get_array_length:
275 RELEASE_ASSERT_NOT_REACHED();
276 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
280 int r0 = (++it)->u.operand;
281 int r1 = (++it)->u.operand;
282 int id0 = (++it)->u.operand;
283 printLocationAndOp(out, exec, location, it, op);
284 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
285 it += 4; // Increment up to the value profiler.
288 static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
293 out.printf("%s = %p", name, structure);
295 PropertyOffset offset = structure->getConcurrently(ident.impl());
296 if (offset != invalidOffset)
297 out.printf(" (offset = %d)", offset);
300 static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
302 out.printf("chain = %p: [", chain);
304 for (WriteBarrier<Structure>* currentStructure = chain->head();
306 ++currentStructure) {
311 dumpStructure(out, "struct", currentStructure->get(), ident);
316 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
318 Instruction* instruction = instructions().begin() + location;
320 const Identifier& ident = identifier(instruction[3].u.operand);
322 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
324 if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
325 out.printf(" llint(array_length)");
326 else if (Structure* structure = instruction[4].u.structure.get()) {
327 out.printf(" llint(");
328 dumpStructure(out, "struct", structure, ident);
333 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
334 StructureStubInfo& stubInfo = *stubPtr;
335 if (stubInfo.resetByGC)
336 out.print(" (Reset By GC)");
341 Structure* baseStructure = 0;
342 Structure* prototypeStructure = 0;
343 StructureChain* chain = 0;
344 PolymorphicGetByIdList* list = 0;
346 switch (stubInfo.accessType) {
347 case access_get_by_id_self:
349 baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
351 case access_get_by_id_list:
353 list = stubInfo.u.getByIdList.list;
359 RELEASE_ASSERT_NOT_REACHED();
365 dumpStructure(out, "struct", baseStructure, ident);
368 if (prototypeStructure) {
370 dumpStructure(out, "prototypeStruct", baseStructure, ident);
375 dumpChain(out, chain, ident);
379 out.printf(", list = %p: [", list);
380 for (unsigned i = 0; i < list->size(); ++i) {
384 dumpStructure(out, "base", list->at(i).structure(), ident);
385 if (list->at(i).chain()) {
387 dumpChain(out, list->at(i).chain(), ident);
401 void CodeBlock::printPutByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
403 Instruction* instruction = instructions().begin() + location;
405 const Identifier& ident = identifier(instruction[2].u.operand);
407 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
409 if (Structure* structure = instruction[4].u.structure.get()) {
410 switch (exec->interpreter()->getOpcodeID(instruction[0].u.opcode)) {
412 case op_put_by_id_out_of_line:
413 out.print(" llint(");
414 dumpStructure(out, "struct", structure, ident);
418 case op_put_by_id_transition_direct:
419 case op_put_by_id_transition_normal:
420 case op_put_by_id_transition_direct_out_of_line:
421 case op_put_by_id_transition_normal_out_of_line:
422 out.print(" llint(");
423 dumpStructure(out, "prev", structure, ident);
425 dumpStructure(out, "next", instruction[6].u.structure.get(), ident);
426 if (StructureChain* chain = instruction[7].u.structureChain.get()) {
428 dumpChain(out, chain, ident);
434 out.print(" llint(unknown)");
440 if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
441 StructureStubInfo& stubInfo = *stubPtr;
442 if (stubInfo.resetByGC)
443 out.print(" (Reset By GC)");
448 switch (stubInfo.accessType) {
449 case access_put_by_id_replace:
450 out.print("replace, ");
451 dumpStructure(out, "struct", stubInfo.u.putByIdReplace.baseObjectStructure.get(), ident);
453 case access_put_by_id_transition_normal:
454 case access_put_by_id_transition_direct:
455 out.print("transition, ");
456 dumpStructure(out, "prev", stubInfo.u.putByIdTransition.previousStructure.get(), ident);
458 dumpStructure(out, "next", stubInfo.u.putByIdTransition.structure.get(), ident);
459 if (StructureChain* chain = stubInfo.u.putByIdTransition.chain.get()) {
461 dumpChain(out, chain, ident);
464 case access_put_by_id_list: {
465 out.printf("list = [");
466 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
468 for (unsigned i = 0; i < list->size(); ++i) {
469 out.print(comma, "(");
470 const PutByIdAccess& access = list->at(i);
472 if (access.isReplace()) {
473 out.print("replace, ");
474 dumpStructure(out, "struct", access.oldStructure(), ident);
475 } else if (access.isSetter()) {
476 out.print("setter, ");
477 dumpStructure(out, "struct", access.oldStructure(), ident);
478 } else if (access.isCustom()) {
479 out.print("custom, ");
480 dumpStructure(out, "struct", access.oldStructure(), ident);
481 } else if (access.isTransition()) {
482 out.print("transition, ");
483 dumpStructure(out, "prev", access.oldStructure(), ident);
485 dumpStructure(out, "next", access.newStructure(), ident);
486 if (access.chain()) {
488 dumpChain(out, access.chain(), ident);
491 out.print("unknown");
502 RELEASE_ASSERT_NOT_REACHED();
513 void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
515 int dst = (++it)->u.operand;
516 int func = (++it)->u.operand;
517 int argCount = (++it)->u.operand;
518 int registerOffset = (++it)->u.operand;
519 printLocationAndOp(out, exec, location, it, op);
520 out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
521 if (cacheDumpMode == DumpCaches) {
522 LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
523 if (callLinkInfo->lastSeenCallee) {
525 " llint(%p, exec %p)",
526 callLinkInfo->lastSeenCallee.get(),
527 callLinkInfo->lastSeenCallee->executable());
530 if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
531 JSFunction* target = info->lastSeenCallee();
533 out.printf(" jit(%p, exec %p)", target, target->executable());
536 if (jitType() != JITCode::FTLJIT)
537 out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
544 dumpArrayProfiling(out, it, hasPrintedProfiling);
545 dumpValueProfiling(out, it, hasPrintedProfiling);
548 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
550 int r0 = (++it)->u.operand;
551 int id0 = (++it)->u.operand;
552 int r1 = (++it)->u.operand;
553 printLocationAndOp(out, exec, location, it, op);
554 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
558 void CodeBlock::dumpSource()
560 dumpSource(WTF::dataFile());
563 void CodeBlock::dumpSource(PrintStream& out)
565 ScriptExecutable* executable = ownerExecutable();
566 if (executable->isFunctionExecutable()) {
567 FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
568 String source = functionExecutable->source().provider()->getRange(
569 functionExecutable->parametersStartOffset(),
570 functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
572 out.print("function ", inferredName(), source);
575 out.print(executable->source().toString());
578 void CodeBlock::dumpBytecode()
580 dumpBytecode(WTF::dataFile());
583 void CodeBlock::dumpBytecode(PrintStream& out)
585 // We only use the ExecState* for things that don't actually lead to JS execution,
586 // like converting a JSString to a String. Hence the globalExec is appropriate.
587 ExecState* exec = m_globalObject->globalExec();
589 size_t instructionCount = 0;
591 for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
596 ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
597 static_cast<unsigned long>(instructions().size()),
598 static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
599 m_numParameters, m_numCalleeRegisters, m_numVars);
600 if (needsActivation() && codeType() == FunctionCode)
601 out.printf("; lexical environment in r%d", activationRegister().offset());
604 StubInfoMap stubInfos;
605 CallLinkInfoMap callLinkInfos;
606 getStubInfoMap(stubInfos);
607 getCallLinkInfoMap(callLinkInfos);
609 const Instruction* begin = instructions().begin();
610 const Instruction* end = instructions().end();
611 for (const Instruction* it = begin; it != end; ++it)
612 dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
614 if (numberOfIdentifiers()) {
615 out.printf("\nIdentifiers:\n");
618 out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
620 } while (i != numberOfIdentifiers());
623 if (!m_constantRegisters.isEmpty()) {
624 out.printf("\nConstants:\n");
627 const char* sourceCodeRepresentationDescription = nullptr;
628 switch (m_constantsSourceCodeRepresentation[i]) {
629 case SourceCodeRepresentation::Double:
630 sourceCodeRepresentationDescription = ": in source as double";
632 case SourceCodeRepresentation::Integer:
633 sourceCodeRepresentationDescription = ": in source as integer";
635 case SourceCodeRepresentation::Other:
636 sourceCodeRepresentationDescription = "";
639 out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
641 } while (i < m_constantRegisters.size());
644 if (size_t count = m_unlinkedCode->numberOfRegExps()) {
645 out.printf("\nm_regexps:\n");
648 out.printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
653 if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
654 out.printf("\nException Handlers:\n");
657 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
658 out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] } %s\n",
659 i + 1, handler.start, handler.end, handler.target, handler.scopeDepth, handler.typeName());
661 } while (i < m_rareData->m_exceptionHandlers.size());
664 if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
665 out.printf("Switch Jump Tables:\n");
668 out.printf(" %1d = {\n", i);
670 Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
671 for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
674 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
678 } while (i < m_rareData->m_switchJumpTables.size());
681 if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
682 out.printf("\nString Switch Jump Tables:\n");
685 out.printf(" %1d = {\n", i);
686 StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
687 for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
688 out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
691 } while (i < m_rareData->m_stringSwitchJumpTables.size());
697 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
699 if (hasPrintedProfiling) {
705 hasPrintedProfiling = true;
708 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
710 ConcurrentJITLocker locker(m_lock);
713 CString description = it->u.profile->briefDescription(locker);
714 if (!description.length())
716 beginDumpProfiling(out, hasPrintedProfiling);
717 out.print(description);
720 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
722 ConcurrentJITLocker locker(m_lock);
725 if (!it->u.arrayProfile)
727 CString description = it->u.arrayProfile->briefDescription(locker, this);
728 if (!description.length())
730 beginDumpProfiling(out, hasPrintedProfiling);
731 out.print(description);
734 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
736 if (!profile || !profile->m_counter)
739 beginDumpProfiling(out, hasPrintedProfiling);
740 out.print(name, profile->m_counter);
743 void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
745 out.printf("[%4d] %-17s ", location, op);
748 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
750 printLocationAndOp(out, exec, location, it, op);
751 out.printf("%s", registerName(operand).data());
754 void CodeBlock::dumpBytecode(
755 PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
756 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
758 int location = it - begin;
759 bool hasPrintedProfiling = false;
760 OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
763 printLocationAndOp(out, exec, location, it, "enter");
767 int r0 = (++it)->u.operand;
768 printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
771 case op_create_direct_arguments: {
772 int r0 = (++it)->u.operand;
773 printLocationAndOp(out, exec, location, it, "create_direct_arguments");
774 out.printf("%s", registerName(r0).data());
777 case op_create_scoped_arguments: {
778 int r0 = (++it)->u.operand;
779 int r1 = (++it)->u.operand;
780 printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
781 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
784 case op_create_out_of_band_arguments: {
785 int r0 = (++it)->u.operand;
786 printLocationAndOp(out, exec, location, it, "create_out_of_band_arguments");
787 out.printf("%s", registerName(r0).data());
790 case op_create_this: {
791 int r0 = (++it)->u.operand;
792 int r1 = (++it)->u.operand;
793 unsigned inferredInlineCapacity = (++it)->u.operand;
794 unsigned cachedFunction = (++it)->u.operand;
795 printLocationAndOp(out, exec, location, it, "create_this");
796 out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
800 int r0 = (++it)->u.operand;
801 printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
802 Structure* structure = (++it)->u.structure.get();
804 out.print(", cache(struct = ", RawPointer(structure), ")");
805 out.print(", ", (++it)->u.toThisStatus);
809 int r0 = (++it)->u.operand;
810 printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
813 case op_new_object: {
814 int r0 = (++it)->u.operand;
815 unsigned inferredInlineCapacity = (++it)->u.operand;
816 printLocationAndOp(out, exec, location, it, "new_object");
817 out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
818 ++it; // Skip object allocation profile.
822 int dst = (++it)->u.operand;
823 int argv = (++it)->u.operand;
824 int argc = (++it)->u.operand;
825 printLocationAndOp(out, exec, location, it, "new_array");
826 out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
827 ++it; // Skip array allocation profile.
830 case op_new_array_with_size: {
831 int dst = (++it)->u.operand;
832 int length = (++it)->u.operand;
833 printLocationAndOp(out, exec, location, it, "new_array_with_size");
834 out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
835 ++it; // Skip array allocation profile.
838 case op_new_array_buffer: {
839 int dst = (++it)->u.operand;
840 int argv = (++it)->u.operand;
841 int argc = (++it)->u.operand;
842 printLocationAndOp(out, exec, location, it, "new_array_buffer");
843 out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
844 ++it; // Skip array allocation profile.
847 case op_new_regexp: {
848 int r0 = (++it)->u.operand;
849 int re0 = (++it)->u.operand;
850 printLocationAndOp(out, exec, location, it, "new_regexp");
851 out.printf("%s, ", registerName(r0).data());
852 if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
853 out.printf("%s", regexpName(re0, regexp(re0)).data());
855 out.printf("bad_regexp(%d)", re0);
859 int r0 = (++it)->u.operand;
860 int r1 = (++it)->u.operand;
861 printLocationAndOp(out, exec, location, it, "mov");
862 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
865 case op_profile_type: {
866 int r0 = (++it)->u.operand;
871 printLocationAndOp(out, exec, location, it, "op_profile_type");
872 out.printf("%s", registerName(r0).data());
875 case op_profile_control_flow: {
876 BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
877 printLocationAndOp(out, exec, location, it, "profile_control_flow");
878 out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
882 printUnaryOp(out, exec, location, it, "not");
886 printBinaryOp(out, exec, location, it, "eq");
890 printUnaryOp(out, exec, location, it, "eq_null");
894 printBinaryOp(out, exec, location, it, "neq");
898 printUnaryOp(out, exec, location, it, "neq_null");
902 printBinaryOp(out, exec, location, it, "stricteq");
906 printBinaryOp(out, exec, location, it, "nstricteq");
910 printBinaryOp(out, exec, location, it, "less");
914 printBinaryOp(out, exec, location, it, "lesseq");
918 printBinaryOp(out, exec, location, it, "greater");
922 printBinaryOp(out, exec, location, it, "greatereq");
926 int r0 = (++it)->u.operand;
927 printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
931 int r0 = (++it)->u.operand;
932 printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
936 printUnaryOp(out, exec, location, it, "to_number");
940 printUnaryOp(out, exec, location, it, "to_string");
944 printUnaryOp(out, exec, location, it, "negate");
948 printBinaryOp(out, exec, location, it, "add");
953 printBinaryOp(out, exec, location, it, "mul");
958 printBinaryOp(out, exec, location, it, "div");
963 printBinaryOp(out, exec, location, it, "mod");
967 printBinaryOp(out, exec, location, it, "sub");
972 printBinaryOp(out, exec, location, it, "lshift");
976 printBinaryOp(out, exec, location, it, "rshift");
980 printBinaryOp(out, exec, location, it, "urshift");
984 printBinaryOp(out, exec, location, it, "bitand");
989 printBinaryOp(out, exec, location, it, "bitxor");
994 printBinaryOp(out, exec, location, it, "bitor");
998 case op_check_has_instance: {
999 int r0 = (++it)->u.operand;
1000 int r1 = (++it)->u.operand;
1001 int r2 = (++it)->u.operand;
1002 int offset = (++it)->u.operand;
1003 printLocationAndOp(out, exec, location, it, "check_has_instance");
1004 out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
1007 case op_instanceof: {
1008 int r0 = (++it)->u.operand;
1009 int r1 = (++it)->u.operand;
1010 int r2 = (++it)->u.operand;
1011 printLocationAndOp(out, exec, location, it, "instanceof");
1012 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1016 printUnaryOp(out, exec, location, it, "unsigned");
1020 printUnaryOp(out, exec, location, it, "typeof");
1023 case op_is_undefined: {
1024 printUnaryOp(out, exec, location, it, "is_undefined");
1027 case op_is_boolean: {
1028 printUnaryOp(out, exec, location, it, "is_boolean");
1031 case op_is_number: {
1032 printUnaryOp(out, exec, location, it, "is_number");
1035 case op_is_string: {
1036 printUnaryOp(out, exec, location, it, "is_string");
1039 case op_is_object: {
1040 printUnaryOp(out, exec, location, it, "is_object");
1043 case op_is_object_or_null: {
1044 printUnaryOp(out, exec, location, it, "is_object_or_null");
1047 case op_is_function: {
1048 printUnaryOp(out, exec, location, it, "is_function");
1052 printBinaryOp(out, exec, location, it, "in");
1055 case op_init_global_const_nop: {
1056 printLocationAndOp(out, exec, location, it, "init_global_const_nop");
1063 case op_init_global_const: {
1064 WriteBarrier<Unknown>* variablePointer = (++it)->u.variablePointer;
1065 int r0 = (++it)->u.operand;
1066 printLocationAndOp(out, exec, location, it, "init_global_const");
1067 out.printf("g%d(%p), %s", m_globalObject->findVariableIndex(variablePointer).offset(), variablePointer, registerName(r0).data());
1073 case op_get_by_id_out_of_line:
1074 case op_get_array_length: {
1075 printGetByIdOp(out, exec, location, it);
1076 printGetByIdCacheStatus(out, exec, location, stubInfos);
1077 dumpValueProfiling(out, it, hasPrintedProfiling);
1080 case op_put_by_id: {
1081 printPutByIdOp(out, exec, location, it, "put_by_id");
1082 printPutByIdCacheStatus(out, exec, location, stubInfos);
1085 case op_put_by_id_out_of_line: {
1086 printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
1087 printPutByIdCacheStatus(out, exec, location, stubInfos);
1090 case op_put_by_id_transition_direct: {
1091 printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
1092 printPutByIdCacheStatus(out, exec, location, stubInfos);
1095 case op_put_by_id_transition_direct_out_of_line: {
1096 printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
1097 printPutByIdCacheStatus(out, exec, location, stubInfos);
1100 case op_put_by_id_transition_normal: {
1101 printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
1102 printPutByIdCacheStatus(out, exec, location, stubInfos);
1105 case op_put_by_id_transition_normal_out_of_line: {
1106 printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
1107 printPutByIdCacheStatus(out, exec, location, stubInfos);
1110 case op_put_getter_by_id: {
1111 int r0 = (++it)->u.operand;
1112 int id0 = (++it)->u.operand;
1113 int r1 = (++it)->u.operand;
1114 printLocationAndOp(out, exec, location, it, "put_getter_by_id");
1115 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
1118 case op_put_setter_by_id: {
1119 int r0 = (++it)->u.operand;
1120 int id0 = (++it)->u.operand;
1121 int r1 = (++it)->u.operand;
1122 printLocationAndOp(out, exec, location, it, "put_setter_by_id");
1123 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
1126 case op_put_getter_setter: {
1127 int r0 = (++it)->u.operand;
1128 int id0 = (++it)->u.operand;
1129 int r1 = (++it)->u.operand;
1130 int r2 = (++it)->u.operand;
1131 printLocationAndOp(out, exec, location, it, "put_getter_setter");
1132 out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
1135 case op_del_by_id: {
1136 int r0 = (++it)->u.operand;
1137 int r1 = (++it)->u.operand;
1138 int id0 = (++it)->u.operand;
1139 printLocationAndOp(out, exec, location, it, "del_by_id");
1140 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1143 case op_get_by_val: {
1144 int r0 = (++it)->u.operand;
1145 int r1 = (++it)->u.operand;
1146 int r2 = (++it)->u.operand;
1147 printLocationAndOp(out, exec, location, it, "get_by_val");
1148 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1149 dumpArrayProfiling(out, it, hasPrintedProfiling);
1150 dumpValueProfiling(out, it, hasPrintedProfiling);
1153 case op_put_by_val: {
1154 int r0 = (++it)->u.operand;
1155 int r1 = (++it)->u.operand;
1156 int r2 = (++it)->u.operand;
1157 printLocationAndOp(out, exec, location, it, "put_by_val");
1158 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1159 dumpArrayProfiling(out, it, hasPrintedProfiling);
1162 case op_put_by_val_direct: {
1163 int r0 = (++it)->u.operand;
1164 int r1 = (++it)->u.operand;
1165 int r2 = (++it)->u.operand;
1166 printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1167 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1168 dumpArrayProfiling(out, it, hasPrintedProfiling);
1171 case op_del_by_val: {
1172 int r0 = (++it)->u.operand;
1173 int r1 = (++it)->u.operand;
1174 int r2 = (++it)->u.operand;
1175 printLocationAndOp(out, exec, location, it, "del_by_val");
1176 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1179 case op_put_by_index: {
1180 int r0 = (++it)->u.operand;
1181 unsigned n0 = (++it)->u.operand;
1182 int r1 = (++it)->u.operand;
1183 printLocationAndOp(out, exec, location, it, "put_by_index");
1184 out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1188 int offset = (++it)->u.operand;
1189 printLocationAndOp(out, exec, location, it, "jmp");
1190 out.printf("%d(->%d)", offset, location + offset);
1194 printConditionalJump(out, exec, begin, it, location, "jtrue");
1198 printConditionalJump(out, exec, begin, it, location, "jfalse");
1202 printConditionalJump(out, exec, begin, it, location, "jeq_null");
1205 case op_jneq_null: {
1206 printConditionalJump(out, exec, begin, it, location, "jneq_null");
1210 int r0 = (++it)->u.operand;
1211 Special::Pointer pointer = (++it)->u.specialPointer;
1212 int offset = (++it)->u.operand;
1213 printLocationAndOp(out, exec, location, it, "jneq_ptr");
1214 out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1218 int r0 = (++it)->u.operand;
1219 int r1 = (++it)->u.operand;
1220 int offset = (++it)->u.operand;
1221 printLocationAndOp(out, exec, location, it, "jless");
1222 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1226 int r0 = (++it)->u.operand;
1227 int r1 = (++it)->u.operand;
1228 int offset = (++it)->u.operand;
1229 printLocationAndOp(out, exec, location, it, "jlesseq");
1230 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1234 int r0 = (++it)->u.operand;
1235 int r1 = (++it)->u.operand;
1236 int offset = (++it)->u.operand;
1237 printLocationAndOp(out, exec, location, it, "jgreater");
1238 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1241 case op_jgreatereq: {
1242 int r0 = (++it)->u.operand;
1243 int r1 = (++it)->u.operand;
1244 int offset = (++it)->u.operand;
1245 printLocationAndOp(out, exec, location, it, "jgreatereq");
1246 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1250 int r0 = (++it)->u.operand;
1251 int r1 = (++it)->u.operand;
1252 int offset = (++it)->u.operand;
1253 printLocationAndOp(out, exec, location, it, "jnless");
1254 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1258 int r0 = (++it)->u.operand;
1259 int r1 = (++it)->u.operand;
1260 int offset = (++it)->u.operand;
1261 printLocationAndOp(out, exec, location, it, "jnlesseq");
1262 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1265 case op_jngreater: {
1266 int r0 = (++it)->u.operand;
1267 int r1 = (++it)->u.operand;
1268 int offset = (++it)->u.operand;
1269 printLocationAndOp(out, exec, location, it, "jngreater");
1270 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1273 case op_jngreatereq: {
1274 int r0 = (++it)->u.operand;
1275 int r1 = (++it)->u.operand;
1276 int offset = (++it)->u.operand;
1277 printLocationAndOp(out, exec, location, it, "jngreatereq");
1278 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1281 case op_loop_hint: {
1282 printLocationAndOp(out, exec, location, it, "loop_hint");
1285 case op_switch_imm: {
1286 int tableIndex = (++it)->u.operand;
1287 int defaultTarget = (++it)->u.operand;
1288 int scrutineeRegister = (++it)->u.operand;
1289 printLocationAndOp(out, exec, location, it, "switch_imm");
1290 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1293 case op_switch_char: {
1294 int tableIndex = (++it)->u.operand;
1295 int defaultTarget = (++it)->u.operand;
1296 int scrutineeRegister = (++it)->u.operand;
1297 printLocationAndOp(out, exec, location, it, "switch_char");
1298 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1301 case op_switch_string: {
1302 int tableIndex = (++it)->u.operand;
1303 int defaultTarget = (++it)->u.operand;
1304 int scrutineeRegister = (++it)->u.operand;
1305 printLocationAndOp(out, exec, location, it, "switch_string");
1306 out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1310 int r0 = (++it)->u.operand;
1311 int r1 = (++it)->u.operand;
1312 int f0 = (++it)->u.operand;
1313 printLocationAndOp(out, exec, location, it, "new_func");
1314 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1317 case op_new_func_exp: {
1318 int r0 = (++it)->u.operand;
1319 int r1 = (++it)->u.operand;
1320 int f0 = (++it)->u.operand;
1321 printLocationAndOp(out, exec, location, it, "new_func_exp");
1322 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1326 printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1329 case op_call_eval: {
1330 printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
1334 case op_construct_varargs:
1335 case op_call_varargs: {
1336 int result = (++it)->u.operand;
1337 int callee = (++it)->u.operand;
1338 int thisValue = (++it)->u.operand;
1339 int arguments = (++it)->u.operand;
1340 int firstFreeRegister = (++it)->u.operand;
1341 int varArgOffset = (++it)->u.operand;
1343 printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : "construct_varargs");
1344 out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
1345 dumpValueProfiling(out, it, hasPrintedProfiling);
1350 int r0 = (++it)->u.operand;
1351 printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1354 case op_construct: {
1355 printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
1359 int r0 = (++it)->u.operand;
1360 int r1 = (++it)->u.operand;
1361 int count = (++it)->u.operand;
1362 printLocationAndOp(out, exec, location, it, "strcat");
1363 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1366 case op_to_primitive: {
1367 int r0 = (++it)->u.operand;
1368 int r1 = (++it)->u.operand;
1369 printLocationAndOp(out, exec, location, it, "to_primitive");
1370 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1373 case op_get_enumerable_length: {
1374 int dst = it[1].u.operand;
1375 int base = it[2].u.operand;
1376 printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
1377 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1378 it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
1381 case op_has_indexed_property: {
1382 int dst = it[1].u.operand;
1383 int base = it[2].u.operand;
1384 int propertyName = it[3].u.operand;
1385 ArrayProfile* arrayProfile = it[4].u.arrayProfile;
1386 printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
1387 out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
1388 it += OPCODE_LENGTH(op_has_indexed_property) - 1;
1391 case op_has_structure_property: {
1392 int dst = it[1].u.operand;
1393 int base = it[2].u.operand;
1394 int propertyName = it[3].u.operand;
1395 int enumerator = it[4].u.operand;
1396 printLocationAndOp(out, exec, location, it, "op_has_structure_property");
1397 out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
1398 it += OPCODE_LENGTH(op_has_structure_property) - 1;
1401 case op_has_generic_property: {
1402 int dst = it[1].u.operand;
1403 int base = it[2].u.operand;
1404 int propertyName = it[3].u.operand;
1405 printLocationAndOp(out, exec, location, it, "op_has_generic_property");
1406 out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
1407 it += OPCODE_LENGTH(op_has_generic_property) - 1;
1410 case op_get_direct_pname: {
1411 int dst = it[1].u.operand;
1412 int base = it[2].u.operand;
1413 int propertyName = it[3].u.operand;
1414 int index = it[4].u.operand;
1415 int enumerator = it[5].u.operand;
1416 ValueProfile* profile = it[6].u.profile;
1417 printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
1418 out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
1419 it += OPCODE_LENGTH(op_get_direct_pname) - 1;
1423 case op_get_property_enumerator: {
1424 int dst = it[1].u.operand;
1425 int base = it[2].u.operand;
1426 printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
1427 out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1428 it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
1431 case op_enumerator_structure_pname: {
1432 int dst = it[1].u.operand;
1433 int enumerator = it[2].u.operand;
1434 int index = it[3].u.operand;
1435 printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
1436 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1437 it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
1440 case op_enumerator_generic_pname: {
1441 int dst = it[1].u.operand;
1442 int enumerator = it[2].u.operand;
1443 int index = it[3].u.operand;
1444 printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
1445 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1446 it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
1449 case op_to_index_string: {
1450 int dst = it[1].u.operand;
1451 int index = it[2].u.operand;
1452 printLocationAndOp(out, exec, location, it, "op_to_index_string");
1453 out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
1454 it += OPCODE_LENGTH(op_to_index_string) - 1;
1457 case op_push_with_scope: {
1458 int dst = (++it)->u.operand;
1459 int newScope = (++it)->u.operand;
1460 printLocationAndOp(out, exec, location, it, "push_with_scope");
1461 out.printf("%s, %s", registerName(dst).data(), registerName(newScope).data());
1464 case op_get_parent_scope: {
1465 int dst = (++it)->u.operand;
1466 int parentScope = (++it)->u.operand;
1467 printLocationAndOp(out, exec, location, it, "get_parent_scope");
1468 out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
1471 case op_push_name_scope: {
1472 int dst = (++it)->u.operand;
1473 int r1 = (++it)->u.operand;
1474 int k0 = (++it)->u.operand;
1475 JSNameScope::Type scopeType = (JSNameScope::Type)(++it)->u.operand;
1476 printLocationAndOp(out, exec, location, it, "push_name_scope");
1477 out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(r1).data(), constantName(k0).data(), (scopeType == JSNameScope::FunctionNameScope) ? "functionScope" : ((scopeType == JSNameScope::CatchScope) ? "catchScope" : "unknownScopeType"));
1480 case op_create_lexical_environment: {
1481 int dst = (++it)->u.operand;
1482 int scope = (++it)->u.operand;
1483 int symbolTable = (++it)->u.operand;
1484 int initialValue = (++it)->u.operand;
1485 printLocationAndOp(out, exec, location, it, "create_lexical_environment");
1486 out.printf("%s, %s, %s, %s",
1487 registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
1491 int r0 = (++it)->u.operand;
1492 int r1 = (++it)->u.operand;
1493 printLocationAndOp(out, exec, location, it, "catch");
1494 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1498 int r0 = (++it)->u.operand;
1499 printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1502 case op_throw_static_error: {
1503 int k0 = (++it)->u.operand;
1504 int k1 = (++it)->u.operand;
1505 printLocationAndOp(out, exec, location, it, "throw_static_error");
1506 out.printf("%s, %s", constantName(k0).data(), k1 ? "true" : "false");
1510 int debugHookID = (++it)->u.operand;
1511 int hasBreakpointFlag = (++it)->u.operand;
1512 printLocationAndOp(out, exec, location, it, "debug");
1513 out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
1516 case op_profile_will_call: {
1517 int function = (++it)->u.operand;
1518 printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
1521 case op_profile_did_call: {
1522 int function = (++it)->u.operand;
1523 printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
1527 int r0 = (++it)->u.operand;
1528 printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1531 case op_resolve_scope: {
1532 int r0 = (++it)->u.operand;
1533 int scope = (++it)->u.operand;
1534 int id0 = (++it)->u.operand;
1535 ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1536 int depth = (++it)->u.operand;
1537 printLocationAndOp(out, exec, location, it, "resolve_scope");
1538 out.printf("%s, %s, %s, %u<%s|%s>, %d", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(),
1539 modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1544 case op_get_from_scope: {
1545 int r0 = (++it)->u.operand;
1546 int r1 = (++it)->u.operand;
1547 int id0 = (++it)->u.operand;
1548 ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1550 int operand = (++it)->u.operand; // Operand
1551 printLocationAndOp(out, exec, location, it, "get_from_scope");
1552 out.print(registerName(r0), ", ", registerName(r1));
1553 if (static_cast<unsigned>(id0) == UINT_MAX)
1554 out.print(", anonymous");
1556 out.print(", ", idName(id0, identifier(id0)));
1557 out.print(", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, ", operand);
1558 dumpValueProfiling(out, it, hasPrintedProfiling);
1561 case op_put_to_scope: {
1562 int r0 = (++it)->u.operand;
1563 int id0 = (++it)->u.operand;
1564 int r1 = (++it)->u.operand;
1565 ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1567 int operand = (++it)->u.operand; // Operand
1568 printLocationAndOp(out, exec, location, it, "put_to_scope");
1569 out.print(registerName(r0));
1570 if (static_cast<unsigned>(id0) == UINT_MAX)
1571 out.print(", anonymous");
1573 out.print(", ", idName(id0, identifier(id0)));
1574 out.print(", ", registerName(r1), ", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, <structure>, ", operand);
1577 case op_get_from_arguments: {
1578 int r0 = (++it)->u.operand;
1579 int r1 = (++it)->u.operand;
1580 int offset = (++it)->u.operand;
1581 printLocationAndOp(out, exec, location, it, "get_from_arguments");
1582 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
1583 dumpValueProfiling(out, it, hasPrintedProfiling);
1586 case op_put_to_arguments: {
1587 int r0 = (++it)->u.operand;
1588 int offset = (++it)->u.operand;
1589 int r1 = (++it)->u.operand;
1590 printLocationAndOp(out, exec, location, it, "put_to_arguments");
1591 out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
1595 RELEASE_ASSERT_NOT_REACHED();
1598 dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1599 dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1602 Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1603 if (!exitSites.isEmpty()) {
1604 out.print(" !! frequent exits: ");
1606 for (unsigned i = 0; i < exitSites.size(); ++i)
1607 out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
1609 #else // ENABLE(DFG_JIT)
1610 UNUSED_PARAM(location);
1611 #endif // ENABLE(DFG_JIT)
1615 void CodeBlock::dumpBytecode(
1616 PrintStream& out, unsigned bytecodeOffset,
1617 const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
1619 ExecState* exec = m_globalObject->globalExec();
1620 const Instruction* it = instructions().begin() + bytecodeOffset;
1621 dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
1624 #define FOR_EACH_MEMBER_VECTOR(macro) \
1625 macro(instructions) \
1626 macro(callLinkInfos) \
1627 macro(linkedCallerList) \
1628 macro(identifiers) \
1629 macro(functionExpressions) \
1630 macro(constantRegisters)
1632 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1635 macro(exceptionHandlers) \
1636 macro(switchJumpTables) \
1637 macro(stringSwitchJumpTables) \
1638 macro(evalCodeCache) \
1639 macro(expressionInfo) \
1641 macro(callReturnIndexVector)
1643 template<typename T>
1644 static size_t sizeInBytes(const Vector<T>& vector)
1646 return vector.capacity() * sizeof(T);
1651 class PutToScopeFireDetail : public FireDetail {
1653 PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
1654 : m_codeBlock(codeBlock)
1659 virtual void dump(PrintStream& out) const override
1661 out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
1665 CodeBlock* m_codeBlock;
1666 const Identifier& m_ident;
1669 } // anonymous namespace
1671 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1672 : m_globalObject(other.m_globalObject)
1673 , m_heap(other.m_heap)
1674 , m_numCalleeRegisters(other.m_numCalleeRegisters)
1675 , m_numVars(other.m_numVars)
1676 , m_isConstructor(other.m_isConstructor)
1677 , m_shouldAlwaysBeInlined(true)
1678 , m_didFailFTLCompilation(false)
1679 , m_hasBeenCompiledWithFTL(false)
1680 , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1681 , m_hasDebuggerStatement(false)
1682 , m_steppingMode(SteppingModeDisabled)
1683 , m_numBreakpoints(0)
1684 , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1686 , m_instructions(other.m_instructions)
1687 , m_thisRegister(other.m_thisRegister)
1688 , m_scopeRegister(other.m_scopeRegister)
1689 , m_lexicalEnvironmentRegister(other.m_lexicalEnvironmentRegister)
1690 , m_isStrictMode(other.m_isStrictMode)
1691 , m_needsActivation(other.m_needsActivation)
1692 , m_mayBeExecuting(false)
1693 , m_source(other.m_source)
1694 , m_sourceOffset(other.m_sourceOffset)
1695 , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1696 , m_codeType(other.m_codeType)
1697 , m_constantRegisters(other.m_constantRegisters)
1698 , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
1699 , m_functionDecls(other.m_functionDecls)
1700 , m_functionExprs(other.m_functionExprs)
1701 , m_osrExitCounter(0)
1702 , m_optimizationDelayCounter(0)
1703 , m_reoptimizationRetryCounter(0)
1704 , m_hash(other.m_hash)
1706 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1709 m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1711 ASSERT(m_heap->isDeferred());
1712 ASSERT(m_scopeRegister.isLocal());
1714 m_symbolTableConstantIndex = other.m_symbolTableConstantIndex;
1716 setNumParameters(other.numParameters());
1717 optimizeAfterWarmUp();
1720 if (other.m_rareData) {
1721 createRareDataIfNecessary();
1723 m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1724 m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1725 m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1726 m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1729 m_heap->m_codeBlocks.add(this);
1730 m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock));
1733 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1734 : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1735 , m_heap(&m_globalObject->vm().heap)
1736 , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1737 , m_numVars(unlinkedCodeBlock->m_numVars)
1738 , m_isConstructor(unlinkedCodeBlock->isConstructor())
1739 , m_shouldAlwaysBeInlined(true)
1740 , m_didFailFTLCompilation(false)
1741 , m_hasBeenCompiledWithFTL(false)
1742 , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1743 , m_hasDebuggerStatement(false)
1744 , m_steppingMode(SteppingModeDisabled)
1745 , m_numBreakpoints(0)
1746 , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1747 , m_vm(unlinkedCodeBlock->vm())
1748 , m_thisRegister(unlinkedCodeBlock->thisRegister())
1749 , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
1750 , m_lexicalEnvironmentRegister(unlinkedCodeBlock->activationRegister())
1751 , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1752 , m_needsActivation(unlinkedCodeBlock->hasActivationRegister() && unlinkedCodeBlock->codeType() == FunctionCode)
1753 , m_mayBeExecuting(false)
1754 , m_source(sourceProvider)
1755 , m_sourceOffset(sourceOffset)
1756 , m_firstLineColumnOffset(firstLineColumnOffset)
1757 , m_codeType(unlinkedCodeBlock->codeType())
1758 , m_osrExitCounter(0)
1759 , m_optimizationDelayCounter(0)
1760 , m_reoptimizationRetryCounter(0)
1762 , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1765 m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1767 ASSERT(m_heap->isDeferred());
1768 ASSERT(m_scopeRegister.isLocal());
1770 bool didCloneSymbolTable = false;
1773 setNumParameters(unlinkedCodeBlock->numParameters());
1775 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1776 vm()->functionHasExecutedCache()->removeUnexecutedRange(m_ownerExecutable->sourceID(), m_ownerExecutable->typeProfilingStartOffset(), m_ownerExecutable->typeProfilingEndOffset());
1778 setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
1779 if (unlinkedCodeBlock->usesGlobalObject())
1780 m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, ownerExecutable, m_globalObject.get());
1782 for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
1783 LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
1784 if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
1785 m_constantRegisters[registerIndex].set(*m_vm, ownerExecutable, m_globalObject->jsCellForLinkTimeConstant(type));
1788 if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
1789 if (m_vm->typeProfiler()) {
1790 ConcurrentJITLocker locker(symbolTable->m_lock);
1791 symbolTable->prepareForTypeProfiling(locker);
1794 SymbolTable* newTable;
1795 if (codeType() == FunctionCode && symbolTable->scopeSize()) {
1796 newTable = symbolTable->cloneScopePart(*m_vm);
1797 didCloneSymbolTable = true;
1799 newTable = symbolTable;
1801 m_symbolTableConstantIndex = unlinkedCodeBlock->symbolTableConstantIndex();
1802 replaceConstant(m_symbolTableConstantIndex, newTable);
1804 m_symbolTableConstantIndex = 0;
1806 m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
1807 for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1808 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1809 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1810 vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1811 m_functionDecls[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1814 m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
1815 for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1816 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1817 if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1818 vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1819 m_functionExprs[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1822 if (unlinkedCodeBlock->hasRareData()) {
1823 createRareDataIfNecessary();
1824 if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1825 m_rareData->m_constantBuffers.grow(count);
1826 for (size_t i = 0; i < count; i++) {
1827 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1828 m_rareData->m_constantBuffers[i] = buffer;
1831 if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1832 m_rareData->m_exceptionHandlers.resizeToFit(count);
1833 size_t nonLocalScopeDepth = scope->depth();
1834 for (size_t i = 0; i < count; i++) {
1835 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
1836 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
1838 handler.initialize(unlinkedHandler, nonLocalScopeDepth,
1839 CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
1841 handler.initialize(unlinkedHandler, nonLocalScopeDepth);
1846 if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1847 m_rareData->m_stringSwitchJumpTables.grow(count);
1848 for (size_t i = 0; i < count; i++) {
1849 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1850 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1851 for (; ptr != end; ++ptr) {
1852 OffsetLocation offset;
1853 offset.branchOffset = ptr->value;
1854 m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1859 if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1860 m_rareData->m_switchJumpTables.grow(count);
1861 for (size_t i = 0; i < count; i++) {
1862 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1863 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1864 destTable.branchOffsets = sourceTable.branchOffsets;
1865 destTable.min = sourceTable.min;
1870 // Allocate metadata buffers for the bytecode
1871 if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1872 m_llintCallLinkInfos.resizeToFit(size);
1873 if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1874 m_arrayProfiles.grow(size);
1875 if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1876 m_arrayAllocationProfiles.resizeToFit(size);
1877 if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1878 m_valueProfiles.resizeToFit(size);
1879 if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1880 m_objectAllocationProfiles.resizeToFit(size);
1882 // Copy and translate the UnlinkedInstructions
1883 unsigned instructionCount = unlinkedCodeBlock->instructions().count();
1884 UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
1886 Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1888 HashSet<int, WTF::IntHash<int>, WTF::UnsignedWithZeroKeyHashTraits<int>> clonedConstantSymbolTables;
1890 for (unsigned i = 0; !instructionReader.atEnd(); ) {
1891 const UnlinkedInstruction* pc = instructionReader.next();
1893 unsigned opLength = opcodeLength(pc[0].u.opcode);
1895 instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
1896 for (size_t j = 1; j < opLength; ++j) {
1897 if (sizeof(int32_t) != sizeof(intptr_t))
1898 instructions[i + j].u.pointer = 0;
1899 instructions[i + j].u.operand = pc[j].u.operand;
1901 switch (pc[0].u.opcode) {
1902 case op_has_indexed_property: {
1903 int arrayProfileIndex = pc[opLength - 1].u.operand;
1904 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1906 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1909 case op_call_varargs:
1910 case op_construct_varargs:
1911 case op_get_by_val: {
1912 int arrayProfileIndex = pc[opLength - 2].u.operand;
1913 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1915 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1918 case op_get_direct_pname:
1920 case op_get_from_arguments: {
1921 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1922 ASSERT(profile->m_bytecodeOffset == -1);
1923 profile->m_bytecodeOffset = i;
1924 instructions[i + opLength - 1] = profile;
1927 case op_put_by_val: {
1928 int arrayProfileIndex = pc[opLength - 1].u.operand;
1929 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1930 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1933 case op_put_by_val_direct: {
1934 int arrayProfileIndex = pc[opLength - 1].u.operand;
1935 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1936 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1941 case op_new_array_buffer:
1942 case op_new_array_with_size: {
1943 int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
1944 instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1947 case op_new_object: {
1948 int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
1949 ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1950 int inferredInlineCapacity = pc[opLength - 2].u.operand;
1952 instructions[i + opLength - 1] = objectAllocationProfile;
1953 objectAllocationProfile->initialize(*vm(),
1954 m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
1959 case op_call_eval: {
1960 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1961 ASSERT(profile->m_bytecodeOffset == -1);
1962 profile->m_bytecodeOffset = i;
1963 instructions[i + opLength - 1] = profile;
1964 int arrayProfileIndex = pc[opLength - 2].u.operand;
1965 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1966 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1967 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1970 case op_construct: {
1971 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1972 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1973 ASSERT(profile->m_bytecodeOffset == -1);
1974 profile->m_bytecodeOffset = i;
1975 instructions[i + opLength - 1] = profile;
1978 case op_get_by_id_out_of_line:
1979 case op_get_array_length:
1982 case op_init_global_const_nop: {
1983 ASSERT(codeType() == GlobalCode);
1984 Identifier ident = identifier(pc[4].u.operand);
1985 SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
1989 instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
1990 instructions[i + 1] = &m_globalObject->variableAt(entry.varOffset().scopeOffset());
1994 case op_resolve_scope: {
1995 const Identifier& ident = identifier(pc[3].u.operand);
1996 ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
1997 RELEASE_ASSERT(type != LocalClosureVar);
1998 int localScopeDepth = pc[5].u.operand;
2000 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type);
2001 instructions[i + 4].u.operand = op.type;
2002 instructions[i + 5].u.operand = op.depth;
2003 if (op.lexicalEnvironment)
2004 instructions[i + 6].u.symbolTable.set(*vm(), ownerExecutable, op.lexicalEnvironment->symbolTable());
2006 instructions[i + 6].u.pointer = nullptr;
2010 case op_get_from_scope: {
2011 ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
2012 ASSERT(profile->m_bytecodeOffset == -1);
2013 profile->m_bytecodeOffset = i;
2014 instructions[i + opLength - 1] = profile;
2016 // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
2018 int localScopeDepth = pc[5].u.operand;
2019 instructions[i + 5].u.pointer = nullptr;
2021 ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
2022 if (modeAndType.type() == LocalClosureVar) {
2023 instructions[i + 4] = ResolveModeAndType(modeAndType.mode(), ClosureVar).operand();
2027 const Identifier& ident = identifier(pc[3].u.operand);
2028 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, modeAndType.type());
2030 instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
2031 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
2032 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2033 else if (op.structure)
2034 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
2035 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2039 case op_put_to_scope: {
2040 // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
2041 ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
2042 if (modeAndType.type() == LocalClosureVar) {
2043 // Only do watching if the property we're putting to is not anonymous.
2044 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
2045 // Different create_lexical_environment instructions may refer to the same symbol table.
2046 // This is used for ES6's 'for' loops each having a separate activation. We will emit two
2047 // create_lexical_environment instructions for a given loop to implement this feature,
2048 // but both instructions should rely on the same underlying symbol table so that the
2049 // loop's scope isn't mistakenly inferred as a singleton scope.
2050 int symbolTableIndex = pc[5].u.operand;
2051 auto addResult = clonedConstantSymbolTables.add(symbolTableIndex);
2052 if (addResult.isNewEntry) {
2053 SymbolTable* unlinkedTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
2054 SymbolTable* linkedTable;
2055 if (unlinkedTable->correspondsToLexicalScope()) {
2056 RELEASE_ASSERT(unlinkedTable->scopeSize());
2057 linkedTable = unlinkedTable->cloneScopePart(*m_vm);
2059 // There is only one SymbolTable per function that does not correspond
2060 // to a lexical scope and that is the function's var symbol table.
2061 // We've already cloned that.
2062 linkedTable = symbolTable();
2063 if (linkedTable->scopeSize())
2064 RELEASE_ASSERT(didCloneSymbolTable);
2066 replaceConstant(symbolTableIndex, linkedTable);
2068 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
2069 const Identifier& ident = identifier(pc[2].u.operand);
2070 ConcurrentJITLocker locker(symbolTable->m_lock);
2071 auto iter = symbolTable->find(locker, ident.impl());
2072 RELEASE_ASSERT(iter != symbolTable->end(locker));
2073 iter->value.prepareToWatch();
2074 instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
2076 instructions[i + 5].u.watchpointSet = nullptr;
2080 const Identifier& ident = identifier(pc[2].u.operand);
2081 int localScopeDepth = pc[5].u.operand;
2082 instructions[i + 5].u.pointer = nullptr;
2083 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, modeAndType.type());
2085 instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
2086 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
2087 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2088 else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
2089 if (op.watchpointSet)
2090 op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident));
2091 } else if (op.structure)
2092 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
2093 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2098 case op_profile_type: {
2099 RELEASE_ASSERT(vm()->typeProfiler());
2100 // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
2101 size_t instructionOffset = i + opLength - 1;
2102 unsigned divotStart, divotEnd;
2103 GlobalVariableID globalVariableID = 0;
2104 RefPtr<TypeSet> globalTypeSet;
2105 bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
2106 VirtualRegister profileRegister(pc[1].u.operand);
2107 ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
2108 SymbolTable* symbolTable = nullptr;
2109 int localScopeDepth = pc[2].u.operand;
2112 case ProfileTypeBytecodePutToScope:
2113 case ProfileTypeBytecodeGetFromScope: {
2114 const Identifier& ident = identifier(pc[4].u.operand);
2115 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
2116 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, (flag == ProfileTypeBytecodeGetFromScope ? Get : Put), type);
2118 // FIXME: handle other values for op.type here, and also consider what to do when we can't statically determine the globalID
2119 // https://bugs.webkit.org/show_bug.cgi?id=135184
2120 if (op.type == ClosureVar)
2121 symbolTable = op.lexicalEnvironment->symbolTable();
2122 else if (op.type == GlobalVar)
2123 symbolTable = m_globalObject.get()->symbolTable();
2126 ConcurrentJITLocker locker(symbolTable->m_lock);
2127 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2128 symbolTable->prepareForTypeProfiling(locker);
2129 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2130 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2132 globalVariableID = TypeProfilerNoGlobalIDExists;
2136 case ProfileTypeBytecodePutToLocalScope:
2137 case ProfileTypeBytecodeGetFromLocalScope: {
2138 const Identifier& ident = identifier(pc[4].u.operand);
2139 symbolTable = this->symbolTable();
2140 ConcurrentJITLocker locker(symbolTable->m_lock);
2141 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2142 symbolTable->prepareForTypeProfiling(locker);
2143 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2144 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2149 case ProfileTypeBytecodeHasGlobalID: {
2150 symbolTable = this->symbolTable();
2151 ConcurrentJITLocker locker(symbolTable->m_lock);
2152 globalVariableID = symbolTable->uniqueIDForOffset(locker, VarOffset(profileRegister), *vm());
2153 globalTypeSet = symbolTable->globalTypeSetForOffset(locker, VarOffset(profileRegister), *vm());
2156 case ProfileTypeBytecodeDoesNotHaveGlobalID:
2157 case ProfileTypeBytecodeFunctionArgument: {
2158 globalVariableID = TypeProfilerNoGlobalIDExists;
2161 case ProfileTypeBytecodeFunctionReturnStatement: {
2162 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
2163 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
2164 globalVariableID = TypeProfilerReturnStatement;
2165 if (!shouldAnalyze) {
2166 // Because a return statement can be added implicitly to return undefined at the end of a function,
2167 // and these nodes don't emit expression ranges because they aren't in the actual source text of
2168 // the user's program, give the type profiler some range to identify these return statements.
2169 // Currently, the text offset that is used as identification is on the open brace of the function
2170 // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
2171 divotStart = divotEnd = m_sourceOffset;
2172 shouldAnalyze = true;
2178 std::pair<TypeLocation*, bool> locationPair = vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
2179 m_ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, vm());
2180 TypeLocation* location = locationPair.first;
2181 bool isNewLocation = locationPair.second;
2183 if (flag == ProfileTypeBytecodeFunctionReturnStatement)
2184 location->m_divotForFunctionOffsetIfReturnStatement = m_sourceOffset;
2186 if (shouldAnalyze && isNewLocation)
2187 vm()->typeProfiler()->insertNewLocation(location);
2189 instructions[i + 2].u.location = location;
2194 if (pc[1].u.index == DidReachBreakpoint)
2195 m_hasDebuggerStatement = true;
2205 if (vm()->controlFlowProfiler())
2206 insertBasicBlockBoundariesForControlFlowProfiler(instructions);
2208 m_instructions = WTF::RefCountedArray<Instruction>(instructions);
2210 // Set optimization thresholds only after m_instructions is initialized, since these
2211 // rely on the instruction count (and are in theory permitted to also inspect the
2212 // instruction stream to more accurate assess the cost of tier-up).
2213 optimizeAfterWarmUp();
2216 // If the concurrent thread will want the code block's hash, then compute it here
2218 if (Options::alwaysComputeHash())
2221 if (Options::dumpGeneratedBytecodes())
2224 m_heap->m_codeBlocks.add(this);
2225 m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
2228 CodeBlock::~CodeBlock()
2230 if (m_vm->m_perBytecodeProfiler)
2231 m_vm->m_perBytecodeProfiler->notifyDestruction(this);
2233 #if ENABLE(VERBOSE_VALUE_PROFILE)
2234 dumpValueProfiles();
2236 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2237 m_incomingLLIntCalls.begin()->remove();
2239 // We may be destroyed before any CodeBlocks that refer to us are destroyed.
2240 // Consider that two CodeBlocks become unreachable at the same time. There
2241 // is no guarantee about the order in which the CodeBlocks are destroyed.
2242 // So, if we don't remove incoming calls, and get destroyed before the
2243 // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
2244 // destructor will try to remove nodes from our (no longer valid) linked list.
2245 while (m_incomingCalls.begin() != m_incomingCalls.end())
2246 m_incomingCalls.begin()->remove();
2247 while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
2248 m_incomingPolymorphicCalls.begin()->remove();
2250 // Note that our outgoing calls will be removed from other CodeBlocks'
2251 // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
2254 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
2256 #endif // ENABLE(JIT)
2259 void CodeBlock::setNumParameters(int newValue)
2261 m_numParameters = newValue;
2263 m_argumentValueProfiles.resizeToFit(newValue);
2266 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
2268 EvalCacheMap::iterator end = m_cacheMap.end();
2269 for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
2270 visitor.append(&ptr->value);
2273 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
2276 if (jitType() != JITCode::DFGJIT)
2278 DFG::JITCode* jitCode = m_jitCode->dfg();
2279 return jitCode->osrEntryBlock.get();
2280 #else // ENABLE(FTL_JIT)
2282 #endif // ENABLE(FTL_JIT)
2285 void CodeBlock::visitAggregate(SlotVisitor& visitor)
2287 #if ENABLE(PARALLEL_GC)
2288 // I may be asked to scan myself more than once, and it may even happen concurrently.
2289 // To this end, use an atomic operation to check (and set) if I've been called already.
2290 // Only one thread may proceed past this point - whichever one wins the atomic set race.
2291 bool setByMe = m_visitAggregateHasBeenCalled.compareExchangeStrong(false, true);
2294 #endif // ENABLE(PARALLEL_GC)
2296 if (!!m_alternative)
2297 m_alternative->visitAggregate(visitor);
2299 if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2300 otherBlock->visitAggregate(visitor);
2302 visitor.reportExtraMemoryVisited(ownerExecutable(), sizeof(CodeBlock));
2304 visitor.reportExtraMemoryVisited(ownerExecutable(), m_jitCode->size());
2305 if (m_instructions.size()) {
2306 // Divide by refCount() because m_instructions points to something that is shared
2307 // by multiple CodeBlocks, and we only want to count it towards the heap size once.
2308 // Having each CodeBlock report only its proportional share of the size is one way
2309 // of accomplishing this.
2310 visitor.reportExtraMemoryVisited(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
2313 visitor.append(&m_unlinkedCode);
2315 // There are three things that may use unconditional finalizers: lazy bytecode freeing,
2316 // inline cache clearing, and jettisoning. The probability of us wanting to do at
2317 // least one of those things is probably quite close to 1. So we add one no matter what
2318 // and when it runs, it figures out whether it has any work to do.
2319 visitor.addUnconditionalFinalizer(this);
2321 m_allTransitionsHaveBeenMarked = false;
2323 if (shouldImmediatelyAssumeLivenessDuringScan()) {
2324 // This code block is live, so scan all references strongly and return.
2325 stronglyVisitStrongReferences(visitor);
2326 stronglyVisitWeakReferences(visitor);
2327 propagateTransitions(visitor);
2331 // There are two things that we use weak reference harvesters for: DFG fixpoint for
2332 // jettisoning, and trying to find structures that would be live based on some
2333 // inline cache. So it makes sense to register them regardless.
2334 visitor.addWeakReferenceHarvester(this);
2337 // We get here if we're live in the sense that our owner executable is live,
2338 // but we're not yet live for sure in another sense: we may yet decide that this
2339 // code block should be jettisoned based on its outgoing weak references being
2340 // stale. Set a flag to indicate that we're still assuming that we're dead, and
2341 // perform one round of determining if we're live. The GC may determine, based on
2342 // either us marking additional objects, or by other objects being marked for
2343 // other reasons, that this iteration should run again; it will notify us of this
2344 // decision by calling harvestWeakReferences().
2346 m_jitCode->dfgCommon()->livenessHasBeenProved = false;
2348 propagateTransitions(visitor);
2349 determineLiveness(visitor);
2350 #else // ENABLE(DFG_JIT)
2351 RELEASE_ASSERT_NOT_REACHED();
2352 #endif // ENABLE(DFG_JIT)
2355 bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan()
2358 // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
2359 // their weak references go stale. So if a basline JIT CodeBlock gets
2360 // scanned, we can assume that this means that it's live.
2361 if (!JITCode::isOptimizingJIT(jitType()))
2364 // For simplicity, we don't attempt to jettison code blocks during GC if
2365 // they are executing. Instead we strongly mark their weak references to
2366 // allow them to continue to execute soundly.
2367 if (m_mayBeExecuting)
2370 if (Options::forceDFGCodeBlockLiveness())
2379 bool CodeBlock::isKnownToBeLiveDuringGC()
2382 // This should return true for:
2383 // - Code blocks that behave like normal objects - i.e. if they are referenced then they
2385 // - Code blocks that were running on the stack.
2386 // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
2387 // because either livenessHasBeenProved would have survived as true or m_mayBeExecuting
2388 // would survive as true.
2389 // - Code blocks that don't have any dead weak references.
2391 return shouldImmediatelyAssumeLivenessDuringScan()
2392 || m_jitCode->dfgCommon()->livenessHasBeenProved;
2399 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
2401 if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
2404 if (!Heap::isMarked(transition.m_from.get()))
2409 #endif // ENABLE(DFG_JIT)
2411 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
2413 UNUSED_PARAM(visitor);
2415 if (m_allTransitionsHaveBeenMarked)
2418 bool allAreMarkedSoFar = true;
2420 Interpreter* interpreter = m_vm->interpreter;
2421 if (jitType() == JITCode::InterpreterThunk) {
2422 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2423 for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2424 Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2425 switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2426 case op_put_by_id_transition_direct:
2427 case op_put_by_id_transition_normal:
2428 case op_put_by_id_transition_direct_out_of_line:
2429 case op_put_by_id_transition_normal_out_of_line: {
2430 if (Heap::isMarked(instruction[4].u.structure.get()))
2431 visitor.append(&instruction[6].u.structure);
2433 allAreMarkedSoFar = false;
2443 if (JITCode::isJIT(jitType())) {
2444 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2445 StructureStubInfo& stubInfo = **iter;
2446 switch (stubInfo.accessType) {
2447 case access_put_by_id_transition_normal:
2448 case access_put_by_id_transition_direct: {
2449 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2450 if ((!origin || Heap::isMarked(origin))
2451 && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
2452 visitor.append(&stubInfo.u.putByIdTransition.structure);
2454 allAreMarkedSoFar = false;
2458 case access_put_by_id_list: {
2459 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
2460 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2461 if (origin && !Heap::isMarked(origin)) {
2462 allAreMarkedSoFar = false;
2465 for (unsigned j = list->size(); j--;) {
2466 PutByIdAccess& access = list->m_list[j];
2467 if (!access.isTransition())
2469 if (Heap::isMarked(access.oldStructure()))
2470 visitor.append(&access.m_newStructure);
2472 allAreMarkedSoFar = false;
2482 #endif // ENABLE(JIT)
2485 if (JITCode::isOptimizingJIT(jitType())) {
2486 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2488 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2489 if (shouldMarkTransition(dfgCommon->transitions[i])) {
2490 // If the following three things are live, then the target of the
2491 // transition is also live:
2493 // - This code block. We know it's live already because otherwise
2494 // we wouldn't be scanning ourselves.
2496 // - The code origin of the transition. Transitions may arise from
2497 // code that was inlined. They are not relevant if the user's
2498 // object that is required for the inlinee to run is no longer
2501 // - The source of the transition. The transition checks if some
2502 // heap location holds the source, and if so, stores the target.
2503 // Hence the source must be live for the transition to be live.
2505 // We also short-circuit the liveness if the structure is harmless
2506 // to mark (i.e. its global object and prototype are both already
2509 visitor.append(&dfgCommon->transitions[i].m_to);
2511 allAreMarkedSoFar = false;
2514 #endif // ENABLE(DFG_JIT)
2516 if (allAreMarkedSoFar)
2517 m_allTransitionsHaveBeenMarked = true;
2520 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2522 UNUSED_PARAM(visitor);
2524 if (shouldImmediatelyAssumeLivenessDuringScan())
2528 // Check if we have any remaining work to do.
2529 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2530 if (dfgCommon->livenessHasBeenProved)
2533 // Now check all of our weak references. If all of them are live, then we
2534 // have proved liveness and so we scan our strong references. If at end of
2535 // GC we still have not proved liveness, then this code block is toast.
2536 bool allAreLiveSoFar = true;
2537 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2538 if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2539 allAreLiveSoFar = false;
2543 if (allAreLiveSoFar) {
2544 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
2545 if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
2546 allAreLiveSoFar = false;
2552 // If some weak references are dead, then this fixpoint iteration was
2554 if (!allAreLiveSoFar)
2557 // All weak references are live. Record this information so we don't
2558 // come back here again, and scan the strong references.
2559 dfgCommon->livenessHasBeenProved = true;
2560 stronglyVisitStrongReferences(visitor);
2561 #endif // ENABLE(DFG_JIT)
2564 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2566 propagateTransitions(visitor);
2567 determineLiveness(visitor);
2570 void CodeBlock::finalizeUnconditionally()
2572 Interpreter* interpreter = m_vm->interpreter;
2573 if (JITCode::couldBeInterpreted(jitType())) {
2574 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2575 for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2576 Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2577 switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2579 case op_get_by_id_out_of_line:
2581 case op_put_by_id_out_of_line:
2582 if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2584 if (Options::verboseOSR())
2585 dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2586 curInstruction[4].u.structure.clear();
2587 curInstruction[5].u.operand = 0;
2589 case op_put_by_id_transition_direct:
2590 case op_put_by_id_transition_normal:
2591 case op_put_by_id_transition_direct_out_of_line:
2592 case op_put_by_id_transition_normal_out_of_line:
2593 if (Heap::isMarked(curInstruction[4].u.structure.get())
2594 && Heap::isMarked(curInstruction[6].u.structure.get())
2595 && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2597 if (Options::verboseOSR()) {
2598 dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2599 curInstruction[4].u.structure.get(),
2600 curInstruction[6].u.structure.get(),
2601 curInstruction[7].u.structureChain.get());
2603 curInstruction[4].u.structure.clear();
2604 curInstruction[6].u.structure.clear();
2605 curInstruction[7].u.structureChain.clear();
2606 curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2608 case op_get_array_length:
2611 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2613 if (Options::verboseOSR())
2614 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2615 curInstruction[2].u.structure.clear();
2616 curInstruction[3].u.toThisStatus = merge(
2617 curInstruction[3].u.toThisStatus, ToThisClearedByGC);
2619 case op_create_this: {
2620 auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
2621 if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
2623 JSCell* cachedFunction = cacheWriteBarrier.get();
2624 if (Heap::isMarked(cachedFunction))
2626 if (Options::verboseOSR())
2627 dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
2628 cacheWriteBarrier.clear();
2631 case op_resolve_scope: {
2632 // Right now this isn't strictly necessary. Any symbol tables that this will refer to
2633 // are for outer functions, and we refer to those functions strongly, and they refer
2634 // to the symbol table strongly. But it's nice to be on the safe side.
2635 WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
2636 if (!symbolTable || Heap::isMarked(symbolTable.get()))
2638 if (Options::verboseOSR())
2639 dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
2640 symbolTable.clear();
2643 case op_get_from_scope:
2644 case op_put_to_scope: {
2645 ResolveModeAndType modeAndType =
2646 ResolveModeAndType(curInstruction[4].u.operand);
2647 if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks || modeAndType.type() == LocalClosureVar)
2649 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2650 if (!structure || Heap::isMarked(structure.get()))
2652 if (Options::verboseOSR())
2653 dataLogF("Clearing scope access with structure %p.\n", structure.get());
2658 OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
2659 ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
2663 for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2664 if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2665 if (Options::verboseOSR())
2666 dataLog("Clearing LLInt call from ", *this, "\n");
2667 m_llintCallLinkInfos[i].unlink();
2669 if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2670 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2675 // Check if we're not live. If we are, then jettison.
2676 if (!isKnownToBeLiveDuringGC()) {
2677 if (Options::verboseOSR())
2678 dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2680 if (DFG::shouldShowDisassembly()) {
2681 dataLog(*this, " will be jettisoned because of the following dead references:\n");
2682 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2683 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2684 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
2685 JSCell* origin = transition.m_codeOrigin.get();
2686 JSCell* from = transition.m_from.get();
2687 JSCell* to = transition.m_to.get();
2688 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2690 dataLog(" Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2692 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2693 JSCell* weak = dfgCommon->weakReferences[i].get();
2694 if (Heap::isMarked(weak))
2696 dataLog(" Weak reference ", RawPointer(weak), ".\n");
2700 jettison(Profiler::JettisonDueToWeakReference);
2703 #endif // ENABLE(DFG_JIT)
2706 // Handle inline caches.
2708 RepatchBuffer repatchBuffer(this);
2710 for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
2711 (*iter)->visitWeak(repatchBuffer);
2713 for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2714 StructureStubInfo& stubInfo = **iter;
2716 if (stubInfo.visitWeakReferences(repatchBuffer))
2719 resetStubDuringGCInternal(repatchBuffer, stubInfo);
2725 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
2728 toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2730 UNUSED_PARAM(result);
2734 void CodeBlock::getStubInfoMap(StubInfoMap& result)
2736 ConcurrentJITLocker locker(m_lock);
2737 getStubInfoMap(locker, result);
2740 void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
2743 toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
2745 UNUSED_PARAM(result);
2749 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
2751 ConcurrentJITLocker locker(m_lock);
2752 getCallLinkInfoMap(locker, result);
2756 StructureStubInfo* CodeBlock::addStubInfo()
2758 ConcurrentJITLocker locker(m_lock);
2759 return m_stubInfos.add();
2762 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
2764 for (StructureStubInfo* stubInfo : m_stubInfos) {
2765 if (stubInfo->codeOrigin == codeOrigin)
2771 CallLinkInfo* CodeBlock::addCallLinkInfo()
2773 ConcurrentJITLocker locker(m_lock);
2774 return m_callLinkInfos.add();
2777 void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2779 if (stubInfo.accessType == access_unset)
2782 ConcurrentJITLocker locker(m_lock);
2784 RepatchBuffer repatchBuffer(this);
2785 resetStubInternal(repatchBuffer, stubInfo);
2788 void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2790 AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2792 if (Options::verboseOSR()) {
2793 // This can be called from GC destructor calls, so we don't try to do a full dump
2794 // of the CodeBlock.
2795 dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
2798 RELEASE_ASSERT(JITCode::isJIT(jitType()));
2800 if (isGetByIdAccess(accessType))
2801 resetGetByID(repatchBuffer, stubInfo);
2802 else if (isPutByIdAccess(accessType))
2803 resetPutByID(repatchBuffer, stubInfo);
2805 RELEASE_ASSERT(isInAccess(accessType));
2806 resetIn(repatchBuffer, stubInfo);
2812 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2814 resetStubInternal(repatchBuffer, stubInfo);
2815 stubInfo.resetByGC = true;
2818 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
2820 for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2821 if ((*iter)->codeOrigin() == CodeOrigin(index))
2828 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2830 visitor.append(&m_globalObject);
2831 visitor.append(&m_ownerExecutable);
2832 visitor.append(&m_unlinkedCode);
2834 m_rareData->m_evalCodeCache.visitAggregate(visitor);
2835 visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2836 for (size_t i = 0; i < m_functionExprs.size(); ++i)
2837 visitor.append(&m_functionExprs[i]);
2838 for (size_t i = 0; i < m_functionDecls.size(); ++i)
2839 visitor.append(&m_functionDecls[i]);
2840 for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2841 m_objectAllocationProfiles[i].visitAggregate(visitor);
2844 if (JITCode::isOptimizingJIT(jitType())) {
2845 // FIXME: This is an antipattern for two reasons. References introduced by the DFG
2846 // that aren't in the original CodeBlock being compiled should be weakly referenced.
2847 // Inline call frames aren't in the original CodeBlock, so they qualify as weak. Also,
2848 // those weak references should already be tracked in the DFG as weak FrozenValues. So,
2849 // there is probably no need for this. We already have assertions that this should be
2851 // https://bugs.webkit.org/show_bug.cgi?id=146613
2852 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2853 if (dfgCommon->inlineCallFrames.get())
2854 dfgCommon->inlineCallFrames->visitAggregate(visitor);
2858 updateAllPredictions();
2861 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2863 UNUSED_PARAM(visitor);
2866 if (!JITCode::isOptimizingJIT(jitType()))
2869 DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2871 for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2872 if (!!dfgCommon->transitions[i].m_codeOrigin)
2873 visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2874 visitor.append(&dfgCommon->transitions[i].m_from);
2875 visitor.append(&dfgCommon->transitions[i].m_to);
2878 for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2879 visitor.append(&dfgCommon->weakReferences[i]);
2881 for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
2882 visitor.append(&dfgCommon->weakStructureReferences[i]);
2886 CodeBlock* CodeBlock::baselineAlternative()
2889 CodeBlock* result = this;
2890 while (result->alternative())
2891 result = result->alternative();
2892 RELEASE_ASSERT(result);
2893 RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
2900 CodeBlock* CodeBlock::baselineVersion()
2903 if (JITCode::isBaselineCode(jitType()))
2905 CodeBlock* result = replacement();
2907 // This can happen if we're creating the original CodeBlock for an executable.
2908 // Assume that we're the baseline CodeBlock.
2909 RELEASE_ASSERT(jitType() == JITCode::None);
2912 result = result->baselineAlternative();
2920 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
2922 return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
2925 bool CodeBlock::hasOptimizedReplacement()
2927 return hasOptimizedReplacement(jitType());
2931 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
2933 RELEASE_ASSERT(bytecodeOffset < instructions().size());
2938 Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2939 for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2940 HandlerInfo& handler = exceptionHandlers[i];
2941 if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
2944 // Handlers are ordered innermost first, so the first handler we encounter
2945 // that contains the source address is the correct handler to use.
2946 if (handler.start <= bytecodeOffset && handler.end > bytecodeOffset)
2953 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2955 RELEASE_ASSERT(bytecodeOffset < instructions().size());
2956 return m_ownerExecutable->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2959 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2966 expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2970 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2972 m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2973 divot += m_sourceOffset;
2974 column += line ? 1 : firstLineColumnOffset();
2975 line += m_ownerExecutable->firstLine();
2978 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
2980 Interpreter* interpreter = vm()->interpreter;
2981 const Instruction* begin = instructions().begin();
2982 const Instruction* end = instructions().end();
2983 for (const Instruction* it = begin; it != end;) {
2984 OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
2985 if (opcodeID == op_debug) {
2986 unsigned bytecodeOffset = it - begin;
2988 unsigned opDebugLine;
2989 unsigned opDebugColumn;
2990 expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
2991 if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
2994 it += opcodeLengths[opcodeID];
2999 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
3001 m_rareCaseProfiles.shrinkToFit();
3002 m_specialFastCaseProfiles.shrinkToFit();
3004 if (shrinkMode == EarlyShrink) {
3005 m_constantRegisters.shrinkToFit();
3006 m_constantsSourceCodeRepresentation.shrinkToFit();
3009 m_rareData->m_switchJumpTables.shrinkToFit();
3010 m_rareData->m_stringSwitchJumpTables.shrinkToFit();
3012 } // else don't shrink these, because we would have already pointed pointers into these tables.
3016 void CodeBlock::unlinkCalls()
3018 if (!!m_alternative)
3019 m_alternative->unlinkCalls();
3020 for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
3021 if (m_llintCallLinkInfos[i].isLinked())
3022 m_llintCallLinkInfos[i].unlink();
3024 if (m_callLinkInfos.isEmpty())
3026 if (!m_vm->canUseJIT())
3028 RepatchBuffer repatchBuffer(this);
3029 for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
3030 CallLinkInfo& info = **iter;
3031 if (!info.isLinked())
3033 info.unlink(repatchBuffer);
3037 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
3039 noticeIncomingCall(callerFrame);
3040 m_incomingCalls.push(incoming);
3043 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
3045 noticeIncomingCall(callerFrame);
3046 m_incomingPolymorphicCalls.push(incoming);
3048 #endif // ENABLE(JIT)
3050 void CodeBlock::unlinkIncomingCalls()
3052 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
3053 m_incomingLLIntCalls.begin()->unlink();
3055 if (m_incomingCalls.isEmpty() && m_incomingPolymorphicCalls.isEmpty())
3057 RepatchBuffer repatchBuffer(this);
3058 while (m_incomingCalls.begin() != m_incomingCalls.end())
3059 m_incomingCalls.begin()->unlink(repatchBuffer);
3060 while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
3061 m_incomingPolymorphicCalls.begin()->unlink(repatchBuffer);
3062 #endif // ENABLE(JIT)
3065 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
3067 noticeIncomingCall(callerFrame);
3068 m_incomingLLIntCalls.push(incoming);
3071 void CodeBlock::clearEvalCache()
3073 if (!!m_alternative)
3074 m_alternative->clearEvalCache();
3075 if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
3076 otherBlock->clearEvalCache();
3079 m_rareData->m_evalCodeCache.clear();
3082 void CodeBlock::install()
3084 ownerExecutable()->installCode(this);
3087 PassRefPtr<CodeBlock> CodeBlock::newReplacement()
3089 return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
3093 CodeBlock* ProgramCodeBlock::replacement()
3095 return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
3098 CodeBlock* EvalCodeBlock::replacement()
3100 return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
3103 CodeBlock* FunctionCodeBlock::replacement()
3105 return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
3108 DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
3110 return DFG::programCapabilityLevel(this);
3113 DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
3115 return DFG::evalCapabilityLevel(this);
3118 DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
3120 if (m_isConstructor)
3121 return DFG::functionForConstructCapabilityLevel(this);
3122 return DFG::functionForCallCapabilityLevel(this);
3126 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
3128 RELEASE_ASSERT(reason != Profiler::NotJettisoned);
3131 if (DFG::shouldShowDisassembly()) {
3132 dataLog("Jettisoning ", *this);
3133 if (mode == CountReoptimization)
3134 dataLog(" and counting reoptimization");
3135 dataLog(" due to ", reason);
3137 dataLog(", ", *detail);
3141 DeferGCForAWhile deferGC(*m_heap);
3142 RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
3144 if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
3145 compilation->setJettisonReason(reason, detail);
3147 // We want to accomplish two things here:
3148 // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
3149 // we should OSR exit at the top of the next bytecode instruction after the return.
3150 // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
3152 // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
3153 // whether the invalidation has already happened.
3154 if (!jitCode()->dfgCommon()->invalidate()) {
3155 // Nothing to do since we've already been invalidated. That means that we cannot be
3156 // the optimized replacement.
3157 RELEASE_ASSERT(this != replacement());
3161 if (DFG::shouldShowDisassembly())
3162 dataLog(" Did invalidate ", *this, "\n");
3164 // Count the reoptimization if that's what the user wanted.
3165 if (mode == CountReoptimization) {
3166 // FIXME: Maybe this should call alternative().
3167 // https://bugs.webkit.org/show_bug.cgi?id=123677
3168 baselineAlternative()->countReoptimization();
3169 if (DFG::shouldShowDisassembly())
3170 dataLog(" Did count reoptimization for ", *this, "\n");
3173 // Now take care of the entrypoint.
3174 if (this != replacement()) {
3175 // This means that we were never the entrypoint. This can happen for OSR entry code
3179 alternative()->optimizeAfterWarmUp();
3180 tallyFrequentExitSites();