2 * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "BytecodeGenerator.h"
34 #include "DFGCapabilities.h"
35 #include "DFGCommon.h"
37 #include "DFGRepatch.h"
39 #include "Interpreter.h"
42 #include "JSActivation.h"
43 #include "JSFunction.h"
44 #include "JSNameScope.h"
46 #include "LowLevelInterpreter.h"
47 #include "RepatchBuffer.h"
48 #include "SlotVisitorInlines.h"
50 #include <wtf/StringExtras.h>
51 #include <wtf/UnusedParam.h>
54 #include "DFGOperations.h"
57 #define DUMP_CODE_BLOCK_STATISTICS 0
65 static String escapeQuotes(const String& str)
69 while ((pos = result.find('\"', pos)) != notFound) {
70 result = makeString(result.substringSharingImpl(0, pos), "\"\\\"\"", result.substringSharingImpl(pos + 1));
76 static String valueToSourceString(ExecState* exec, JSValue val)
79 return ASCIILiteral("0");
82 return makeString("\"", escapeQuotes(val.toString(exec)->value(exec)), "\"");
84 return val.description();
87 static CString constantName(ExecState* exec, int k, JSValue value)
89 return makeString(valueToSourceString(exec, value), "(@k", String::number(k - FirstConstantRegisterIndex), ")").utf8();
92 static CString idName(int id0, const Identifier& ident)
94 return makeString(ident.string(), "(@id", String::number(id0), ")").utf8();
97 void CodeBlock::dumpBytecodeCommentAndNewLine(int location)
99 #if ENABLE(BYTECODE_COMMENTS)
100 const char* comment = commentForBytecodeOffset(location);
102 dataLog("\t\t ; %s", comment);
104 UNUSED_PARAM(location);
109 CString CodeBlock::registerName(ExecState* exec, int r) const
111 if (r == missingThisObjectMarker())
114 if (isConstantRegisterIndex(r))
115 return constantName(exec, r, getConstant(r));
117 return makeString("r", String::number(r)).utf8();
120 static String regexpToSourceString(RegExp* regExp)
122 char postfix[5] = { '/', 0, 0, 0, 0 };
124 if (regExp->global())
125 postfix[index++] = 'g';
126 if (regExp->ignoreCase())
127 postfix[index++] = 'i';
128 if (regExp->multiline())
129 postfix[index] = 'm';
131 return makeString("/", regExp->pattern(), postfix);
134 static CString regexpName(int re, RegExp* regexp)
136 return makeString(regexpToSourceString(regexp), "(@re", String::number(re), ")").utf8();
139 static String pointerToSourceString(void* p)
141 char buffer[2 + 2 * sizeof(void*) + 1]; // 0x [two characters per byte] \0
142 snprintf(buffer, sizeof(buffer), "%p", p);
146 NEVER_INLINE static const char* debugHookName(int debugHookID)
148 switch (static_cast<DebugHookID>(debugHookID)) {
149 case DidEnterCallFrame:
150 return "didEnterCallFrame";
151 case WillLeaveCallFrame:
152 return "willLeaveCallFrame";
153 case WillExecuteStatement:
154 return "willExecuteStatement";
155 case WillExecuteProgram:
156 return "willExecuteProgram";
157 case DidExecuteProgram:
158 return "didExecuteProgram";
159 case DidReachBreakpoint:
160 return "didReachBreakpoint";
163 ASSERT_NOT_REACHED();
167 void CodeBlock::printUnaryOp(ExecState* exec, int location, const Instruction*& it, const char* op)
169 int r0 = (++it)->u.operand;
170 int r1 = (++it)->u.operand;
172 dataLog("[%4d] %s\t\t %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data());
173 dumpBytecodeCommentAndNewLine(location);
176 void CodeBlock::printBinaryOp(ExecState* exec, int location, const Instruction*& it, const char* op)
178 int r0 = (++it)->u.operand;
179 int r1 = (++it)->u.operand;
180 int r2 = (++it)->u.operand;
181 dataLog("[%4d] %s\t\t %s, %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
182 dumpBytecodeCommentAndNewLine(location);
185 void CodeBlock::printConditionalJump(ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
187 int r0 = (++it)->u.operand;
188 int offset = (++it)->u.operand;
189 dataLog("[%4d] %s\t\t %s, %d(->%d)", location, op, registerName(exec, r0).data(), offset, location + offset);
190 dumpBytecodeCommentAndNewLine(location);
193 void CodeBlock::printGetByIdOp(ExecState* exec, int location, const Instruction*& it)
196 switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
200 case op_get_by_id_out_of_line:
201 op = "get_by_id_out_of_line";
203 case op_get_by_id_self:
204 op = "get_by_id_self";
206 case op_get_by_id_proto:
207 op = "get_by_id_proto";
209 case op_get_by_id_chain:
210 op = "get_by_id_chain";
212 case op_get_by_id_getter_self:
213 op = "get_by_id_getter_self";
215 case op_get_by_id_getter_proto:
216 op = "get_by_id_getter_proto";
218 case op_get_by_id_getter_chain:
219 op = "get_by_id_getter_chain";
221 case op_get_by_id_custom_self:
222 op = "get_by_id_custom_self";
224 case op_get_by_id_custom_proto:
225 op = "get_by_id_custom_proto";
227 case op_get_by_id_custom_chain:
228 op = "get_by_id_custom_chain";
230 case op_get_by_id_generic:
231 op = "get_by_id_generic";
233 case op_get_array_length:
236 case op_get_string_length:
237 op = "string_length";
240 ASSERT_NOT_REACHED();
243 int r0 = (++it)->u.operand;
244 int r1 = (++it)->u.operand;
245 int id0 = (++it)->u.operand;
246 dataLog("[%4d] %s\t %s, %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
250 #if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations
251 static void dumpStructure(const char* name, ExecState* exec, Structure* structure, Identifier& ident)
256 dataLog("%s = %p", name, structure);
258 PropertyOffset offset = structure->get(exec->globalData(), ident);
259 if (offset != invalidOffset)
260 dataLog(" (offset = %d)", offset);
264 #if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
265 static void dumpChain(ExecState* exec, StructureChain* chain, Identifier& ident)
267 dataLog("chain = %p: [", chain);
269 for (WriteBarrier<Structure>* currentStructure = chain->head();
271 ++currentStructure) {
276 dumpStructure("struct", exec, currentStructure->get(), ident);
282 void CodeBlock::printGetByIdCacheStatus(ExecState* exec, int location)
284 Instruction* instruction = instructions().begin() + location;
286 Identifier& ident = identifier(instruction[3].u.operand);
288 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
291 if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
292 dataLog(" llint(array_length)");
294 Structure* structure = instruction[4].u.structure.get();
296 dumpStructure("struct", exec, structure, ident);
302 if (numberOfStructureStubInfos()) {
304 StructureStubInfo& stubInfo = getStubInfo(location);
308 Structure* baseStructure = 0;
309 Structure* prototypeStructure = 0;
310 StructureChain* chain = 0;
311 PolymorphicAccessStructureList* structureList = 0;
314 switch (stubInfo.accessType) {
315 case access_get_by_id_self:
317 baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
319 case access_get_by_id_proto:
321 baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get();
322 prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get();
324 case access_get_by_id_chain:
326 baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
327 chain = stubInfo.u.getByIdChain.chain.get();
329 case access_get_by_id_self_list:
330 dataLog("self_list");
331 structureList = stubInfo.u.getByIdSelfList.structureList;
332 listSize = stubInfo.u.getByIdSelfList.listSize;
334 case access_get_by_id_proto_list:
335 dataLog("proto_list");
336 structureList = stubInfo.u.getByIdProtoList.structureList;
337 listSize = stubInfo.u.getByIdProtoList.listSize;
342 case access_get_by_id_generic:
345 case access_get_array_length:
346 dataLog("array_length");
348 case access_get_string_length:
349 dataLog("string_length");
352 ASSERT_NOT_REACHED();
358 dumpStructure("struct", exec, baseStructure, ident);
361 if (prototypeStructure) {
363 dumpStructure("prototypeStruct", exec, baseStructure, ident);
368 dumpChain(exec, chain, ident);
372 dataLog(", list = %p: [", structureList);
373 for (int i = 0; i < listSize; ++i) {
377 dumpStructure("base", exec, structureList->list[i].base.get(), ident);
378 if (structureList->list[i].isChain) {
379 if (structureList->list[i].u.chain.get()) {
381 dumpChain(exec, structureList->list[i].u.chain.get(), ident);
384 if (structureList->list[i].u.proto.get()) {
386 dumpStructure("proto", exec, structureList->list[i].u.proto.get(), ident);
399 void CodeBlock::printCallOp(ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode)
401 int func = (++it)->u.operand;
402 int argCount = (++it)->u.operand;
403 int registerOffset = (++it)->u.operand;
404 dataLog("[%4d] %s\t %s, %d, %d", location, op, registerName(exec, func).data(), argCount, registerOffset);
405 if (cacheDumpMode == DumpCaches) {
407 LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
408 if (callLinkInfo->lastSeenCallee) {
409 dataLog(" llint(%p, exec %p)",
410 callLinkInfo->lastSeenCallee.get(),
411 callLinkInfo->lastSeenCallee->executable());
413 dataLog(" llint(not set)");
416 if (numberOfCallLinkInfos()) {
417 JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get();
419 dataLog(" jit(%p, exec %p)", target, target->executable());
421 dataLog(" jit(not set)");
425 dumpBytecodeCommentAndNewLine(location);
429 void CodeBlock::printPutByIdOp(ExecState* exec, int location, const Instruction*& it, const char* op)
431 int r0 = (++it)->u.operand;
432 int id0 = (++it)->u.operand;
433 int r1 = (++it)->u.operand;
434 dataLog("[%4d] %s\t %s, %s, %s", location, op, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
435 dumpBytecodeCommentAndNewLine(location);
439 void CodeBlock::printStructure(const char* name, const Instruction* vPC, int operand)
441 unsigned instructionOffset = vPC - instructions().begin();
442 dataLog(" [%4d] %s: %s\n", instructionOffset, name, pointerToSourceString(vPC[operand].u.structure).utf8().data());
445 void CodeBlock::printStructures(const Instruction* vPC)
447 Interpreter* interpreter = m_globalData->interpreter;
448 unsigned instructionOffset = vPC - instructions().begin();
450 if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id)) {
451 printStructure("get_by_id", vPC, 4);
454 if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self)) {
455 printStructure("get_by_id_self", vPC, 4);
458 if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto)) {
459 dataLog(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data());
462 if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_transition)) {
463 dataLog(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data(), pointerToSourceString(vPC[6].u.structureChain).utf8().data());
466 if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain)) {
467 dataLog(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structureChain).utf8().data());
470 if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id)) {
471 printStructure("put_by_id", vPC, 4);
474 if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_replace)) {
475 printStructure("put_by_id_replace", vPC, 4);
479 // These m_instructions doesn't ref Structures.
480 ASSERT(vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_call) || vPC[0].u.opcode == interpreter->getOpcode(op_call_eval) || vPC[0].u.opcode == interpreter->getOpcode(op_construct));
483 void CodeBlock::dump()
485 // We only use the ExecState* for things that don't actually lead to JS execution,
486 // like converting a JSString to a String. Hence the globalExec is appropriate.
487 ExecState* exec = m_globalObject->globalExec();
489 size_t instructionCount = 0;
491 for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
495 "%lu m_instructions; %lu bytes at %p (%s); %d parameter(s); %d callee register(s); %d variable(s)",
496 static_cast<unsigned long>(instructions().size()),
497 static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
498 this, codeTypeToString(codeType()), m_numParameters, m_numCalleeRegisters,
500 if (symbolTable()->captureCount())
501 dataLog("; %d captured var(s)", symbolTable()->captureCount());
502 if (usesArguments()) {
504 "; uses arguments, in r%d, r%d",
506 unmodifiedArgumentsRegister(argumentsRegister()));
508 if (needsFullScopeChain() && codeType() == FunctionCode)
509 dataLog("; activation in r%d", activationRegister());
512 const Instruction* begin = instructions().begin();
513 const Instruction* end = instructions().end();
514 for (const Instruction* it = begin; it != end; ++it)
515 dump(exec, begin, it);
517 if (!m_identifiers.isEmpty()) {
518 dataLog("\nIdentifiers:\n");
521 dataLog(" id%u = %s\n", static_cast<unsigned>(i), m_identifiers[i].string().utf8().data());
523 } while (i != m_identifiers.size());
526 if (!m_constantRegisters.isEmpty()) {
527 dataLog("\nConstants:\n");
530 dataLog(" k%u = %s\n", static_cast<unsigned>(i), valueToSourceString(exec, m_constantRegisters[i].get()).utf8().data());
532 } while (i < m_constantRegisters.size());
535 if (size_t count = m_unlinkedCode->numberOfRegExps()) {
536 dataLog("\nm_regexps:\n");
539 dataLog(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).utf8().data());
545 if (!m_structureStubInfos.isEmpty())
546 dataLog("\nStructures:\n");
549 if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
550 dataLog("\nException Handlers:\n");
553 dataLog("\t %d: { start: [%4d] end: [%4d] target: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target);
555 } while (i < m_rareData->m_exceptionHandlers.size());
558 if (m_rareData && !m_rareData->m_immediateSwitchJumpTables.isEmpty()) {
559 dataLog("Immediate Switch Jump Tables:\n");
562 dataLog(" %1d = {\n", i);
564 Vector<int32_t>::const_iterator end = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.end();
565 for (Vector<int32_t>::const_iterator iter = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
568 dataLog("\t\t%4d => %04d\n", entry + m_rareData->m_immediateSwitchJumpTables[i].min, *iter);
572 } while (i < m_rareData->m_immediateSwitchJumpTables.size());
575 if (m_rareData && !m_rareData->m_characterSwitchJumpTables.isEmpty()) {
576 dataLog("\nCharacter Switch Jump Tables:\n");
579 dataLog(" %1d = {\n", i);
581 Vector<int32_t>::const_iterator end = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.end();
582 for (Vector<int32_t>::const_iterator iter = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
585 ASSERT(!((i + m_rareData->m_characterSwitchJumpTables[i].min) & ~0xFFFF));
586 UChar ch = static_cast<UChar>(entry + m_rareData->m_characterSwitchJumpTables[i].min);
587 dataLog("\t\t\"%s\" => %04d\n", String(&ch, 1).utf8().data(), *iter);
591 } while (i < m_rareData->m_characterSwitchJumpTables.size());
594 if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
595 dataLog("\nString Switch Jump Tables:\n");
598 dataLog(" %1d = {\n", i);
599 StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
600 for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
601 dataLog("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
604 } while (i < m_rareData->m_stringSwitchJumpTables.size());
610 void CodeBlock::dump(ExecState* exec, const Instruction* begin, const Instruction*& it)
612 int location = it - begin;
613 switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
615 dataLog("[%4d] enter", location);
616 dumpBytecodeCommentAndNewLine(location);
619 case op_create_activation: {
620 int r0 = (++it)->u.operand;
621 dataLog("[%4d] create_activation %s", location, registerName(exec, r0).data());
622 dumpBytecodeCommentAndNewLine(location);
625 case op_create_arguments: {
626 int r0 = (++it)->u.operand;
627 dataLog("[%4d] create_arguments\t %s", location, registerName(exec, r0).data());
628 dumpBytecodeCommentAndNewLine(location);
631 case op_init_lazy_reg: {
632 int r0 = (++it)->u.operand;
633 dataLog("[%4d] init_lazy_reg\t %s", location, registerName(exec, r0).data());
634 dumpBytecodeCommentAndNewLine(location);
637 case op_get_callee: {
638 int r0 = (++it)->u.operand;
639 dataLog("[%4d] op_get_callee %s\n", location, registerName(exec, r0).data());
643 case op_create_this: {
644 int r0 = (++it)->u.operand;
645 int r1 = (++it)->u.operand;
646 dataLog("[%4d] create_this %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
647 dumpBytecodeCommentAndNewLine(location);
650 case op_convert_this: {
651 int r0 = (++it)->u.operand;
652 dataLog("[%4d] convert_this\t %s", location, registerName(exec, r0).data());
653 dumpBytecodeCommentAndNewLine(location);
654 ++it; // Skip value profile.
657 case op_new_object: {
658 int r0 = (++it)->u.operand;
659 dataLog("[%4d] new_object\t %s", location, registerName(exec, r0).data());
660 dumpBytecodeCommentAndNewLine(location);
664 int dst = (++it)->u.operand;
665 int argv = (++it)->u.operand;
666 int argc = (++it)->u.operand;
667 dataLog("[%4d] new_array\t %s, %s, %d", location, registerName(exec, dst).data(), registerName(exec, argv).data(), argc);
668 dumpBytecodeCommentAndNewLine(location);
669 ++it; // Skip array allocation profile.
672 case op_new_array_with_size: {
673 int dst = (++it)->u.operand;
674 int length = (++it)->u.operand;
675 dataLog("[%4d] new_array_with_size\t %s, %s", location, registerName(exec, dst).data(), registerName(exec, length).data());
676 dumpBytecodeCommentAndNewLine(location);
677 ++it; // Skip array allocation profile.
680 case op_new_array_buffer: {
681 int dst = (++it)->u.operand;
682 int argv = (++it)->u.operand;
683 int argc = (++it)->u.operand;
684 dataLog("[%4d] new_array_buffer\t %s, %d, %d", location, registerName(exec, dst).data(), argv, argc);
685 dumpBytecodeCommentAndNewLine(location);
686 ++it; // Skip array allocation profile.
689 case op_new_regexp: {
690 int r0 = (++it)->u.operand;
691 int re0 = (++it)->u.operand;
692 dataLog("[%4d] new_regexp\t %s, ", location, registerName(exec, r0).data());
693 if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
694 dataLog("%s", regexpName(re0, regexp(re0)).data());
696 dataLog("bad_regexp(%d)", re0);
697 dumpBytecodeCommentAndNewLine(location);
701 int r0 = (++it)->u.operand;
702 int r1 = (++it)->u.operand;
703 dataLog("[%4d] mov\t\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
704 dumpBytecodeCommentAndNewLine(location);
708 printUnaryOp(exec, location, it, "not");
712 printBinaryOp(exec, location, it, "eq");
716 printUnaryOp(exec, location, it, "eq_null");
720 printBinaryOp(exec, location, it, "neq");
724 printUnaryOp(exec, location, it, "neq_null");
728 printBinaryOp(exec, location, it, "stricteq");
732 printBinaryOp(exec, location, it, "nstricteq");
736 printBinaryOp(exec, location, it, "less");
740 printBinaryOp(exec, location, it, "lesseq");
744 printBinaryOp(exec, location, it, "greater");
748 printBinaryOp(exec, location, it, "greatereq");
752 int r0 = (++it)->u.operand;
753 dataLog("[%4d] pre_inc\t\t %s", location, registerName(exec, r0).data());
754 dumpBytecodeCommentAndNewLine(location);
758 int r0 = (++it)->u.operand;
759 dataLog("[%4d] pre_dec\t\t %s", location, registerName(exec, r0).data());
760 dumpBytecodeCommentAndNewLine(location);
764 printUnaryOp(exec, location, it, "post_inc");
768 printUnaryOp(exec, location, it, "post_dec");
771 case op_to_jsnumber: {
772 printUnaryOp(exec, location, it, "to_jsnumber");
776 printUnaryOp(exec, location, it, "negate");
780 printBinaryOp(exec, location, it, "add");
785 printBinaryOp(exec, location, it, "mul");
790 printBinaryOp(exec, location, it, "div");
795 printBinaryOp(exec, location, it, "mod");
799 printBinaryOp(exec, location, it, "sub");
804 printBinaryOp(exec, location, it, "lshift");
808 printBinaryOp(exec, location, it, "rshift");
812 printBinaryOp(exec, location, it, "urshift");
816 printBinaryOp(exec, location, it, "bitand");
821 printBinaryOp(exec, location, it, "bitxor");
826 printBinaryOp(exec, location, it, "bitor");
830 case op_check_has_instance: {
831 int r0 = (++it)->u.operand;
832 int r1 = (++it)->u.operand;
833 int r2 = (++it)->u.operand;
834 int offset = (++it)->u.operand;
835 dataLog("[%4d] check_has_instance\t\t %s, %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), offset, location + offset);
836 dumpBytecodeCommentAndNewLine(location);
839 case op_instanceof: {
840 int r0 = (++it)->u.operand;
841 int r1 = (++it)->u.operand;
842 int r2 = (++it)->u.operand;
843 dataLog("[%4d] instanceof\t\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
844 dumpBytecodeCommentAndNewLine(location);
848 printUnaryOp(exec, location, it, "typeof");
851 case op_is_undefined: {
852 printUnaryOp(exec, location, it, "is_undefined");
855 case op_is_boolean: {
856 printUnaryOp(exec, location, it, "is_boolean");
860 printUnaryOp(exec, location, it, "is_number");
864 printUnaryOp(exec, location, it, "is_string");
868 printUnaryOp(exec, location, it, "is_object");
871 case op_is_function: {
872 printUnaryOp(exec, location, it, "is_function");
876 printBinaryOp(exec, location, it, "in");
879 case op_put_to_base_variable:
880 case op_put_to_base: {
881 int base = (++it)->u.operand;
882 int id0 = (++it)->u.operand;
883 int value = (++it)->u.operand;
884 int resolveInfo = (++it)->u.operand;
885 dataLog("[%4d] put_to_base\t %s, %s, %s, %d", location, registerName(exec, base).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, value).data(), resolveInfo);
886 dumpBytecodeCommentAndNewLine(location);
890 case op_resolve_global_property:
891 case op_resolve_global_var:
892 case op_resolve_scoped_var:
893 case op_resolve_scoped_var_on_top_scope:
894 case op_resolve_scoped_var_with_top_scope_check: {
895 int r0 = (++it)->u.operand;
896 int id0 = (++it)->u.operand;
897 int resolveInfo = (++it)->u.operand;
898 dataLog("[%4d] resolve\t\t %s, %s, %d", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo);
899 dumpBytecodeCommentAndNewLine(location);
903 case op_init_global_const_nop: {
904 dataLog("[%4d] init_global_const_nop\t", location);
905 dumpBytecodeCommentAndNewLine(location);
912 case op_init_global_const: {
913 WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
914 int r0 = (++it)->u.operand;
915 dataLog("[%4d] init_global_const\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data());
916 dumpBytecodeCommentAndNewLine(location);
921 case op_init_global_const_check: {
922 WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
923 int r0 = (++it)->u.operand;
924 dataLog("[%4d] init_global_const_check\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data());
925 dumpBytecodeCommentAndNewLine(location);
930 case op_resolve_base_to_global:
931 case op_resolve_base_to_global_dynamic:
932 case op_resolve_base_to_scope:
933 case op_resolve_base_to_scope_with_top_scope_check:
934 case op_resolve_base: {
935 int r0 = (++it)->u.operand;
936 int id0 = (++it)->u.operand;
937 int isStrict = (++it)->u.operand;
938 int resolveInfo = (++it)->u.operand;
939 int putToBaseInfo = (++it)->u.operand;
940 dataLog("[%4d] resolve_base%s\t %s, %s, %d, %d", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo);
941 dumpBytecodeCommentAndNewLine(location);
945 case op_ensure_property_exists: {
946 int r0 = (++it)->u.operand;
947 int id0 = (++it)->u.operand;
948 dataLog("[%4d] ensure_property_exists\t %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
949 dumpBytecodeCommentAndNewLine(location);
952 case op_resolve_with_base: {
953 int r0 = (++it)->u.operand;
954 int r1 = (++it)->u.operand;
955 int id0 = (++it)->u.operand;
956 int resolveInfo = (++it)->u.operand;
957 int putToBaseInfo = (++it)->u.operand;
958 dataLog("[%4d] resolve_with_base %s, %s, %s, %d, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo, putToBaseInfo);
959 dumpBytecodeCommentAndNewLine(location);
963 case op_resolve_with_this: {
964 int r0 = (++it)->u.operand;
965 int r1 = (++it)->u.operand;
966 int id0 = (++it)->u.operand;
967 int resolveInfo = (++it)->u.operand;
968 dataLog("[%4d] resolve_with_this %s, %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data(), resolveInfo);
969 dumpBytecodeCommentAndNewLine(location);
974 case op_get_by_id_out_of_line:
975 case op_get_by_id_self:
976 case op_get_by_id_proto:
977 case op_get_by_id_chain:
978 case op_get_by_id_getter_self:
979 case op_get_by_id_getter_proto:
980 case op_get_by_id_getter_chain:
981 case op_get_by_id_custom_self:
982 case op_get_by_id_custom_proto:
983 case op_get_by_id_custom_chain:
984 case op_get_by_id_generic:
985 case op_get_array_length:
986 case op_get_string_length: {
987 printGetByIdOp(exec, location, it);
988 printGetByIdCacheStatus(exec, location);
989 dumpBytecodeCommentAndNewLine(location);
992 case op_get_arguments_length: {
993 printUnaryOp(exec, location, it, "get_arguments_length");
998 printPutByIdOp(exec, location, it, "put_by_id");
1001 case op_put_by_id_out_of_line: {
1002 printPutByIdOp(exec, location, it, "put_by_id_out_of_line");
1005 case op_put_by_id_replace: {
1006 printPutByIdOp(exec, location, it, "put_by_id_replace");
1009 case op_put_by_id_transition: {
1010 printPutByIdOp(exec, location, it, "put_by_id_transition");
1013 case op_put_by_id_transition_direct: {
1014 printPutByIdOp(exec, location, it, "put_by_id_transition_direct");
1017 case op_put_by_id_transition_direct_out_of_line: {
1018 printPutByIdOp(exec, location, it, "put_by_id_transition_direct_out_of_line");
1021 case op_put_by_id_transition_normal: {
1022 printPutByIdOp(exec, location, it, "put_by_id_transition_normal");
1025 case op_put_by_id_transition_normal_out_of_line: {
1026 printPutByIdOp(exec, location, it, "put_by_id_transition_normal_out_of_line");
1029 case op_put_by_id_generic: {
1030 printPutByIdOp(exec, location, it, "put_by_id_generic");
1033 case op_put_getter_setter: {
1034 int r0 = (++it)->u.operand;
1035 int id0 = (++it)->u.operand;
1036 int r1 = (++it)->u.operand;
1037 int r2 = (++it)->u.operand;
1038 dataLog("[%4d] put_getter_setter\t %s, %s, %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
1039 dumpBytecodeCommentAndNewLine(location);
1042 case op_del_by_id: {
1043 int r0 = (++it)->u.operand;
1044 int r1 = (++it)->u.operand;
1045 int id0 = (++it)->u.operand;
1046 dataLog("[%4d] del_by_id\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
1047 dumpBytecodeCommentAndNewLine(location);
1050 case op_get_by_val: {
1051 int r0 = (++it)->u.operand;
1052 int r1 = (++it)->u.operand;
1053 int r2 = (++it)->u.operand;
1054 dataLog("[%4d] get_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
1055 dumpBytecodeCommentAndNewLine(location);
1060 case op_get_argument_by_val: {
1061 int r0 = (++it)->u.operand;
1062 int r1 = (++it)->u.operand;
1063 int r2 = (++it)->u.operand;
1064 dataLog("[%4d] get_argument_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
1065 dumpBytecodeCommentAndNewLine(location);
1070 case op_get_by_pname: {
1071 int r0 = (++it)->u.operand;
1072 int r1 = (++it)->u.operand;
1073 int r2 = (++it)->u.operand;
1074 int r3 = (++it)->u.operand;
1075 int r4 = (++it)->u.operand;
1076 int r5 = (++it)->u.operand;
1077 dataLog("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), registerName(exec, r4).data(), registerName(exec, r5).data());
1078 dumpBytecodeCommentAndNewLine(location);
1081 case op_put_by_val: {
1082 int r0 = (++it)->u.operand;
1083 int r1 = (++it)->u.operand;
1084 int r2 = (++it)->u.operand;
1085 dataLog("[%4d] put_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
1086 dumpBytecodeCommentAndNewLine(location);
1090 case op_del_by_val: {
1091 int r0 = (++it)->u.operand;
1092 int r1 = (++it)->u.operand;
1093 int r2 = (++it)->u.operand;
1094 dataLog("[%4d] del_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
1095 dumpBytecodeCommentAndNewLine(location);
1098 case op_put_by_index: {
1099 int r0 = (++it)->u.operand;
1100 unsigned n0 = (++it)->u.operand;
1101 int r1 = (++it)->u.operand;
1102 dataLog("[%4d] put_by_index\t %s, %u, %s", location, registerName(exec, r0).data(), n0, registerName(exec, r1).data());
1103 dumpBytecodeCommentAndNewLine(location);
1107 int offset = (++it)->u.operand;
1108 dataLog("[%4d] jmp\t\t %d(->%d)", location, offset, location + offset);
1109 dumpBytecodeCommentAndNewLine(location);
1113 int offset = (++it)->u.operand;
1114 dataLog("[%4d] loop\t\t %d(->%d)", location, offset, location + offset);
1115 dumpBytecodeCommentAndNewLine(location);
1119 printConditionalJump(exec, begin, it, location, "jtrue");
1122 case op_loop_if_true: {
1123 printConditionalJump(exec, begin, it, location, "loop_if_true");
1126 case op_loop_if_false: {
1127 printConditionalJump(exec, begin, it, location, "loop_if_false");
1131 printConditionalJump(exec, begin, it, location, "jfalse");
1135 printConditionalJump(exec, begin, it, location, "jeq_null");
1138 case op_jneq_null: {
1139 printConditionalJump(exec, begin, it, location, "jneq_null");
1143 int r0 = (++it)->u.operand;
1144 Special::Pointer pointer = (++it)->u.specialPointer;
1145 int offset = (++it)->u.operand;
1146 dataLog("[%4d] jneq_ptr\t\t %s, %d (%p), %d(->%d)", location, registerName(exec, r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1147 dumpBytecodeCommentAndNewLine(location);
1151 int r0 = (++it)->u.operand;
1152 int r1 = (++it)->u.operand;
1153 int offset = (++it)->u.operand;
1154 dataLog("[%4d] jless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1155 dumpBytecodeCommentAndNewLine(location);
1159 int r0 = (++it)->u.operand;
1160 int r1 = (++it)->u.operand;
1161 int offset = (++it)->u.operand;
1162 dataLog("[%4d] jlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1163 dumpBytecodeCommentAndNewLine(location);
1167 int r0 = (++it)->u.operand;
1168 int r1 = (++it)->u.operand;
1169 int offset = (++it)->u.operand;
1170 dataLog("[%4d] jgreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1171 dumpBytecodeCommentAndNewLine(location);
1174 case op_jgreatereq: {
1175 int r0 = (++it)->u.operand;
1176 int r1 = (++it)->u.operand;
1177 int offset = (++it)->u.operand;
1178 dataLog("[%4d] jgreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1179 dumpBytecodeCommentAndNewLine(location);
1183 int r0 = (++it)->u.operand;
1184 int r1 = (++it)->u.operand;
1185 int offset = (++it)->u.operand;
1186 dataLog("[%4d] jnless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1187 dumpBytecodeCommentAndNewLine(location);
1191 int r0 = (++it)->u.operand;
1192 int r1 = (++it)->u.operand;
1193 int offset = (++it)->u.operand;
1194 dataLog("[%4d] jnlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1195 dumpBytecodeCommentAndNewLine(location);
1198 case op_jngreater: {
1199 int r0 = (++it)->u.operand;
1200 int r1 = (++it)->u.operand;
1201 int offset = (++it)->u.operand;
1202 dataLog("[%4d] jngreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1203 dumpBytecodeCommentAndNewLine(location);
1206 case op_jngreatereq: {
1207 int r0 = (++it)->u.operand;
1208 int r1 = (++it)->u.operand;
1209 int offset = (++it)->u.operand;
1210 dataLog("[%4d] jngreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1211 dumpBytecodeCommentAndNewLine(location);
1214 case op_loop_if_less: {
1215 int r0 = (++it)->u.operand;
1216 int r1 = (++it)->u.operand;
1217 int offset = (++it)->u.operand;
1218 dataLog("[%4d] loop_if_less\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1219 dumpBytecodeCommentAndNewLine(location);
1222 case op_loop_if_lesseq: {
1223 int r0 = (++it)->u.operand;
1224 int r1 = (++it)->u.operand;
1225 int offset = (++it)->u.operand;
1226 dataLog("[%4d] loop_if_lesseq\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1227 dumpBytecodeCommentAndNewLine(location);
1230 case op_loop_if_greater: {
1231 int r0 = (++it)->u.operand;
1232 int r1 = (++it)->u.operand;
1233 int offset = (++it)->u.operand;
1234 dataLog("[%4d] loop_if_greater\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1235 dumpBytecodeCommentAndNewLine(location);
1238 case op_loop_if_greatereq: {
1239 int r0 = (++it)->u.operand;
1240 int r1 = (++it)->u.operand;
1241 int offset = (++it)->u.operand;
1242 dataLog("[%4d] loop_if_greatereq\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
1243 dumpBytecodeCommentAndNewLine(location);
1246 case op_loop_hint: {
1247 dataLog("[%4d] loop_hint", location);
1248 dumpBytecodeCommentAndNewLine(location);
1251 case op_switch_imm: {
1252 int tableIndex = (++it)->u.operand;
1253 int defaultTarget = (++it)->u.operand;
1254 int scrutineeRegister = (++it)->u.operand;
1255 dataLog("[%4d] switch_imm\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
1256 dumpBytecodeCommentAndNewLine(location);
1259 case op_switch_char: {
1260 int tableIndex = (++it)->u.operand;
1261 int defaultTarget = (++it)->u.operand;
1262 int scrutineeRegister = (++it)->u.operand;
1263 dataLog("[%4d] switch_char\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
1264 dumpBytecodeCommentAndNewLine(location);
1267 case op_switch_string: {
1268 int tableIndex = (++it)->u.operand;
1269 int defaultTarget = (++it)->u.operand;
1270 int scrutineeRegister = (++it)->u.operand;
1271 dataLog("[%4d] switch_string\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
1272 dumpBytecodeCommentAndNewLine(location);
1276 int r0 = (++it)->u.operand;
1277 int f0 = (++it)->u.operand;
1278 int shouldCheck = (++it)->u.operand;
1279 dataLog("[%4d] new_func\t\t %s, f%d, %s", location, registerName(exec, r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
1280 dumpBytecodeCommentAndNewLine(location);
1283 case op_new_func_exp: {
1284 int r0 = (++it)->u.operand;
1285 int f0 = (++it)->u.operand;
1286 dataLog("[%4d] new_func_exp\t %s, f%d", location, registerName(exec, r0).data(), f0);
1287 dumpBytecodeCommentAndNewLine(location);
1291 printCallOp(exec, location, it, "call", DumpCaches);
1294 case op_call_eval: {
1295 printCallOp(exec, location, it, "call_eval", DontDumpCaches);
1298 case op_call_varargs: {
1299 int callee = (++it)->u.operand;
1300 int thisValue = (++it)->u.operand;
1301 int arguments = (++it)->u.operand;
1302 int firstFreeRegister = (++it)->u.operand;
1303 dataLog("[%4d] call_varargs\t %s, %s, %s, %d", location, registerName(exec, callee).data(), registerName(exec, thisValue).data(), registerName(exec, arguments).data(), firstFreeRegister);
1304 dumpBytecodeCommentAndNewLine(location);
1307 case op_tear_off_activation: {
1308 int r0 = (++it)->u.operand;
1309 dataLog("[%4d] tear_off_activation\t %s", location, registerName(exec, r0).data());
1310 dumpBytecodeCommentAndNewLine(location);
1313 case op_tear_off_arguments: {
1314 int r0 = (++it)->u.operand;
1315 int r1 = (++it)->u.operand;
1316 dataLog("[%4d] tear_off_arguments %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
1317 dumpBytecodeCommentAndNewLine(location);
1321 int r0 = (++it)->u.operand;
1322 dataLog("[%4d] ret\t\t %s", location, registerName(exec, r0).data());
1323 dumpBytecodeCommentAndNewLine(location);
1326 case op_call_put_result: {
1327 int r0 = (++it)->u.operand;
1328 dataLog("[%4d] call_put_result\t\t %s", location, registerName(exec, r0).data());
1329 dumpBytecodeCommentAndNewLine(location);
1333 case op_ret_object_or_this: {
1334 int r0 = (++it)->u.operand;
1335 int r1 = (++it)->u.operand;
1336 dataLog("[%4d] constructor_ret\t\t %s %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
1337 dumpBytecodeCommentAndNewLine(location);
1340 case op_construct: {
1341 printCallOp(exec, location, it, "construct", DumpCaches);
1345 int r0 = (++it)->u.operand;
1346 int r1 = (++it)->u.operand;
1347 int count = (++it)->u.operand;
1348 dataLog("[%4d] strcat\t\t %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), count);
1349 dumpBytecodeCommentAndNewLine(location);
1352 case op_to_primitive: {
1353 int r0 = (++it)->u.operand;
1354 int r1 = (++it)->u.operand;
1355 dataLog("[%4d] to_primitive\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data());
1356 dumpBytecodeCommentAndNewLine(location);
1359 case op_get_pnames: {
1360 int r0 = it[1].u.operand;
1361 int r1 = it[2].u.operand;
1362 int r2 = it[3].u.operand;
1363 int r3 = it[4].u.operand;
1364 int offset = it[5].u.operand;
1365 dataLog("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), offset, location + offset);
1366 dumpBytecodeCommentAndNewLine(location);
1367 it += OPCODE_LENGTH(op_get_pnames) - 1;
1370 case op_next_pname: {
1371 int dest = it[1].u.operand;
1372 int base = it[2].u.operand;
1373 int i = it[3].u.operand;
1374 int size = it[4].u.operand;
1375 int iter = it[5].u.operand;
1376 int offset = it[6].u.operand;
1377 dataLog("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)", location, registerName(exec, dest).data(), registerName(exec, base).data(), registerName(exec, i).data(), registerName(exec, size).data(), registerName(exec, iter).data(), offset, location + offset);
1378 dumpBytecodeCommentAndNewLine(location);
1379 it += OPCODE_LENGTH(op_next_pname) - 1;
1382 case op_push_with_scope: {
1383 int r0 = (++it)->u.operand;
1384 dataLog("[%4d] push_with_scope\t %s", location, registerName(exec, r0).data());
1385 dumpBytecodeCommentAndNewLine(location);
1388 case op_pop_scope: {
1389 dataLog("[%4d] pop_scope", location);
1390 dumpBytecodeCommentAndNewLine(location);
1393 case op_push_name_scope: {
1394 int id0 = (++it)->u.operand;
1395 int r1 = (++it)->u.operand;
1396 unsigned attributes = (++it)->u.operand;
1397 dataLog("[%4d] push_name_scope \t%s, %s, %u", location, idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), attributes);
1398 dumpBytecodeCommentAndNewLine(location);
1401 case op_jmp_scopes: {
1402 int scopeDelta = (++it)->u.operand;
1403 int offset = (++it)->u.operand;
1404 dataLog("[%4d] jmp_scopes\t^%d, %d(->%d)", location, scopeDelta, offset, location + offset);
1405 dumpBytecodeCommentAndNewLine(location);
1409 int r0 = (++it)->u.operand;
1410 dataLog("[%4d] catch\t\t %s", location, registerName(exec, r0).data());
1411 dumpBytecodeCommentAndNewLine(location);
1415 int r0 = (++it)->u.operand;
1416 dataLog("[%4d] throw\t\t %s", location, registerName(exec, r0).data());
1417 dumpBytecodeCommentAndNewLine(location);
1420 case op_throw_static_error: {
1421 int k0 = (++it)->u.operand;
1422 int k1 = (++it)->u.operand;
1423 dataLog("[%4d] throw_static_error\t %s, %s", location, constantName(exec, k0, getConstant(k0)).data(), k1 ? "true" : "false");
1424 dumpBytecodeCommentAndNewLine(location);
1428 int debugHookID = (++it)->u.operand;
1429 int firstLine = (++it)->u.operand;
1430 int lastLine = (++it)->u.operand;
1431 int column = (++it)->u.operand;
1432 dataLog("[%4d] debug\t\t %s, %d, %d, %d", location, debugHookName(debugHookID), firstLine, lastLine, column);
1433 dumpBytecodeCommentAndNewLine(location);
1436 case op_profile_will_call: {
1437 int function = (++it)->u.operand;
1438 dataLog("[%4d] profile_will_call %s", location, registerName(exec, function).data());
1439 dumpBytecodeCommentAndNewLine(location);
1442 case op_profile_did_call: {
1443 int function = (++it)->u.operand;
1444 dataLog("[%4d] profile_did_call\t %s", location, registerName(exec, function).data());
1445 dumpBytecodeCommentAndNewLine(location);
1449 int r0 = (++it)->u.operand;
1450 dataLog("[%4d] end\t\t %s", location, registerName(exec, r0).data());
1451 dumpBytecodeCommentAndNewLine(location);
1454 #if ENABLE(LLINT_C_LOOP)
1456 ASSERT(false); // We should never get here.
1461 void CodeBlock::dump(unsigned bytecodeOffset)
1463 ExecState* exec = m_globalObject->globalExec();
1464 const Instruction* it = instructions().begin() + bytecodeOffset;
1465 dump(exec, instructions().begin(), it);
1468 #if DUMP_CODE_BLOCK_STATISTICS
1469 static HashSet<CodeBlock*> liveCodeBlockSet;
1472 #define FOR_EACH_MEMBER_VECTOR(macro) \
1473 macro(instructions) \
1474 macro(globalResolveInfos) \
1475 macro(structureStubInfos) \
1476 macro(callLinkInfos) \
1477 macro(linkedCallerList) \
1478 macro(identifiers) \
1479 macro(functionExpressions) \
1480 macro(constantRegisters)
1482 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1485 macro(exceptionHandlers) \
1486 macro(immediateSwitchJumpTables) \
1487 macro(characterSwitchJumpTables) \
1488 macro(stringSwitchJumpTables) \
1489 macro(evalCodeCache) \
1490 macro(expressionInfo) \
1492 macro(callReturnIndexVector)
1494 template<typename T>
1495 static size_t sizeInBytes(const Vector<T>& vector)
1497 return vector.capacity() * sizeof(T);
1500 void CodeBlock::dumpStatistics()
1502 #if DUMP_CODE_BLOCK_STATISTICS
1503 #define DEFINE_VARS(name) size_t name##IsNotEmpty = 0; size_t name##TotalSize = 0;
1504 FOR_EACH_MEMBER_VECTOR(DEFINE_VARS)
1505 FOR_EACH_MEMBER_VECTOR_RARE_DATA(DEFINE_VARS)
1508 // Non-vector data members
1509 size_t evalCodeCacheIsNotEmpty = 0;
1511 size_t symbolTableIsNotEmpty = 0;
1512 size_t symbolTableTotalSize = 0;
1514 size_t hasRareData = 0;
1516 size_t isFunctionCode = 0;
1517 size_t isGlobalCode = 0;
1518 size_t isEvalCode = 0;
1520 HashSet<CodeBlock*>::const_iterator end = liveCodeBlockSet.end();
1521 for (HashSet<CodeBlock*>::const_iterator it = liveCodeBlockSet.begin(); it != end; ++it) {
1522 CodeBlock* codeBlock = *it;
1524 #define GET_STATS(name) if (!codeBlock->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_##name); }
1525 FOR_EACH_MEMBER_VECTOR(GET_STATS)
1528 if (codeBlock->symbolTable() && !codeBlock->symbolTable()->isEmpty()) {
1529 symbolTableIsNotEmpty++;
1530 symbolTableTotalSize += (codeBlock->symbolTable()->capacity() * (sizeof(SymbolTable::KeyType) + sizeof(SymbolTable::MappedType)));
1533 if (codeBlock->m_rareData) {
1535 #define GET_STATS(name) if (!codeBlock->m_rareData->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_rareData->m_##name); }
1536 FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_STATS)
1539 if (!codeBlock->m_rareData->m_evalCodeCache.isEmpty())
1540 evalCodeCacheIsNotEmpty++;
1543 switch (codeBlock->codeType()) {
1556 size_t totalSize = 0;
1558 #define GET_TOTAL_SIZE(name) totalSize += name##TotalSize;
1559 FOR_EACH_MEMBER_VECTOR(GET_TOTAL_SIZE)
1560 FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_TOTAL_SIZE)
1561 #undef GET_TOTAL_SIZE
1563 totalSize += symbolTableTotalSize;
1564 totalSize += (liveCodeBlockSet.size() * sizeof(CodeBlock));
1566 dataLog("Number of live CodeBlocks: %d\n", liveCodeBlockSet.size());
1567 dataLog("Size of a single CodeBlock [sizeof(CodeBlock)]: %zu\n", sizeof(CodeBlock));
1568 dataLog("Size of all CodeBlocks: %zu\n", totalSize);
1569 dataLog("Average size of a CodeBlock: %zu\n", totalSize / liveCodeBlockSet.size());
1571 dataLog("Number of FunctionCode CodeBlocks: %zu (%.3f%%)\n", isFunctionCode, static_cast<double>(isFunctionCode) * 100.0 / liveCodeBlockSet.size());
1572 dataLog("Number of GlobalCode CodeBlocks: %zu (%.3f%%)\n", isGlobalCode, static_cast<double>(isGlobalCode) * 100.0 / liveCodeBlockSet.size());
1573 dataLog("Number of EvalCode CodeBlocks: %zu (%.3f%%)\n", isEvalCode, static_cast<double>(isEvalCode) * 100.0 / liveCodeBlockSet.size());
1575 dataLog("Number of CodeBlocks with rare data: %zu (%.3f%%)\n", hasRareData, static_cast<double>(hasRareData) * 100.0 / liveCodeBlockSet.size());
1577 #define PRINT_STATS(name) dataLog("Number of CodeBlocks with " #name ": %zu\n", name##IsNotEmpty); dataLog("Size of all " #name ": %zu\n", name##TotalSize);
1578 FOR_EACH_MEMBER_VECTOR(PRINT_STATS)
1579 FOR_EACH_MEMBER_VECTOR_RARE_DATA(PRINT_STATS)
1582 dataLog("Number of CodeBlocks with evalCodeCache: %zu\n", evalCodeCacheIsNotEmpty);
1583 dataLog("Number of CodeBlocks with symbolTable: %zu\n", symbolTableIsNotEmpty);
1585 dataLog("Size of all symbolTables: %zu\n", symbolTableTotalSize);
1588 dataLog("Dumping CodeBlock statistics is not enabled.\n");
1592 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1593 : m_globalObject(other.m_globalObject)
1594 , m_heap(other.m_heap)
1595 , m_numCalleeRegisters(other.m_numCalleeRegisters)
1596 , m_numVars(other.m_numVars)
1597 , m_isConstructor(other.m_isConstructor)
1598 , m_unlinkedCode(*other.m_globalData, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1599 , m_ownerExecutable(*other.m_globalData, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1600 , m_globalData(other.m_globalData)
1601 , m_instructions(other.m_instructions)
1602 , m_thisRegister(other.m_thisRegister)
1603 , m_argumentsRegister(other.m_argumentsRegister)
1604 , m_activationRegister(other.m_activationRegister)
1605 , m_isStrictMode(other.m_isStrictMode)
1606 , m_source(other.m_source)
1607 , m_sourceOffset(other.m_sourceOffset)
1608 #if ENABLE(VALUE_PROFILER)
1609 , m_executionEntryCount(0)
1611 , m_identifiers(other.m_identifiers)
1612 , m_constantRegisters(other.m_constantRegisters)
1613 , m_functionDecls(other.m_functionDecls)
1614 , m_functionExprs(other.m_functionExprs)
1615 , m_osrExitCounter(0)
1616 , m_optimizationDelayCounter(0)
1617 , m_reoptimizationRetryCounter(0)
1618 , m_resolveOperations(other.m_resolveOperations)
1619 , m_putToBaseOperations(other.m_putToBaseOperations)
1620 #if ENABLE(BYTECODE_COMMENTS)
1621 , m_bytecodeCommentIterator(0)
1624 , m_canCompileWithDFGState(DFG::CapabilityLevelNotSet)
1627 setNumParameters(other.numParameters());
1628 optimizeAfterWarmUp();
1631 if (other.m_rareData) {
1632 createRareDataIfNecessary();
1634 m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1635 m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1636 m_rareData->m_immediateSwitchJumpTables = other.m_rareData->m_immediateSwitchJumpTables;
1637 m_rareData->m_characterSwitchJumpTables = other.m_rareData->m_characterSwitchJumpTables;
1638 m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1642 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
1643 : m_globalObject(globalObject->globalData(), ownerExecutable, globalObject)
1644 , m_heap(&m_globalObject->globalData().heap)
1645 , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1646 , m_numVars(unlinkedCodeBlock->m_numVars)
1647 , m_isConstructor(unlinkedCodeBlock->isConstructor())
1648 , m_unlinkedCode(globalObject->globalData(), ownerExecutable, unlinkedCodeBlock)
1649 , m_ownerExecutable(globalObject->globalData(), ownerExecutable, ownerExecutable)
1650 , m_globalData(unlinkedCodeBlock->globalData())
1651 , m_thisRegister(unlinkedCodeBlock->thisRegister())
1652 , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
1653 , m_activationRegister(unlinkedCodeBlock->activationRegister())
1654 , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1655 , m_source(sourceProvider)
1656 , m_sourceOffset(sourceOffset)
1657 #if ENABLE(VALUE_PROFILER)
1658 , m_executionEntryCount(0)
1660 , m_alternative(alternative)
1661 , m_osrExitCounter(0)
1662 , m_optimizationDelayCounter(0)
1663 , m_reoptimizationRetryCounter(0)
1664 #if ENABLE(BYTECODE_COMMENTS)
1665 , m_bytecodeCommentIterator(0)
1668 m_globalData->startedCompiling(this);
1671 setNumParameters(unlinkedCodeBlock->numParameters());
1673 optimizeAfterWarmUp();
1676 #if DUMP_CODE_BLOCK_STATISTICS
1677 liveCodeBlockSet.add(this);
1679 setIdentifiers(unlinkedCodeBlock->identifiers());
1680 setConstantRegisters(unlinkedCodeBlock->constantRegisters());
1682 m_functionDecls.grow(unlinkedCodeBlock->numberOfFunctionDecls());
1683 for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1684 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1685 unsigned lineCount = unlinkedExecutable->lineCount();
1686 unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1687 unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1688 unsigned sourceLength = unlinkedExecutable->sourceLength();
1689 SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine);
1690 FunctionExecutable* executable = FunctionExecutable::create(*m_globalData, code, unlinkedExecutable, firstLine, firstLine + lineCount);
1691 m_functionDecls[i].set(*m_globalData, ownerExecutable, executable);
1694 m_functionExprs.grow(unlinkedCodeBlock->numberOfFunctionExprs());
1695 for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1696 UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1697 unsigned lineCount = unlinkedExecutable->lineCount();
1698 unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1699 unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1700 unsigned sourceLength = unlinkedExecutable->sourceLength();
1701 SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine);
1702 FunctionExecutable* executable = FunctionExecutable::create(*m_globalData, code, unlinkedExecutable, firstLine, firstLine + lineCount);
1703 m_functionExprs[i].set(*m_globalData, ownerExecutable, executable);
1706 if (unlinkedCodeBlock->hasRareData()) {
1707 createRareDataIfNecessary();
1708 if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1709 m_rareData->m_constantBuffers.grow(count);
1710 for (size_t i = 0; i < count; i++) {
1711 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1712 m_rareData->m_constantBuffers[i] = buffer;
1715 if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1716 m_rareData->m_exceptionHandlers.grow(count);
1717 for (size_t i = 0; i < count; i++) {
1718 const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
1719 m_rareData->m_exceptionHandlers[i].start = handler.start;
1720 m_rareData->m_exceptionHandlers[i].end = handler.end;
1721 m_rareData->m_exceptionHandlers[i].target = handler.target;
1722 m_rareData->m_exceptionHandlers[i].scopeDepth = handler.scopeDepth + baseScopeDepth;
1723 #if ENABLE(JIT) && ENABLE(LLINT)
1724 m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch)));
1729 if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1730 m_rareData->m_stringSwitchJumpTables.grow(count);
1731 for (size_t i = 0; i < count; i++) {
1732 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1733 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1734 for (; ptr != end; ++ptr) {
1735 OffsetLocation offset;
1736 offset.branchOffset = ptr->value;
1737 m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1742 if (size_t count = unlinkedCodeBlock->numberOfImmediateSwitchJumpTables()) {
1743 m_rareData->m_immediateSwitchJumpTables.grow(count);
1744 for (size_t i = 0; i < count; i++) {
1745 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->immediateSwitchJumpTable(i);
1746 SimpleJumpTable& destTable = m_rareData->m_immediateSwitchJumpTables[i];
1747 destTable.branchOffsets = sourceTable.branchOffsets;
1748 destTable.min = sourceTable.min;
1752 if (size_t count = unlinkedCodeBlock->numberOfCharacterSwitchJumpTables()) {
1753 m_rareData->m_characterSwitchJumpTables.grow(count);
1754 for (size_t i = 0; i < count; i++) {
1755 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->characterSwitchJumpTable(i);
1756 SimpleJumpTable& destTable = m_rareData->m_characterSwitchJumpTables[i];
1757 destTable.branchOffsets = sourceTable.branchOffsets;
1758 destTable.min = sourceTable.min;
1763 // Allocate metadata buffers for the bytecode
1765 if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1766 m_llintCallLinkInfos.grow(size);
1769 if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1770 m_arrayProfiles.grow(size);
1771 if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1772 m_arrayAllocationProfiles.grow(size);
1773 if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1774 m_valueProfiles.grow(size);
1776 if (size_t size = unlinkedCodeBlock->numberOfResolveOperations())
1777 m_resolveOperations.grow(size);
1778 size_t putToBaseCount = unlinkedCodeBlock->numberOfPutToBaseOperations();
1779 m_putToBaseOperations.reserveCapacity(putToBaseCount);
1780 for (size_t i = 0; i < putToBaseCount; ++i)
1781 m_putToBaseOperations.append(PutToBaseOperation(isStrictMode()));
1783 ASSERT(m_putToBaseOperations.capacity() == putToBaseCount);
1785 // Copy and translate the UnlinkedInstructions
1786 size_t instructionCount = unlinkedCodeBlock->instructions().size();
1787 UnlinkedInstruction* pc = unlinkedCodeBlock->instructions().data();
1788 Vector<Instruction> instructions(instructionCount);
1789 for (size_t i = 0; i < unlinkedCodeBlock->instructions().size(); ) {
1790 unsigned opLength = opcodeLength(pc[i].u.opcode);
1791 instructions[i] = globalData()->interpreter->getOpcode(pc[i].u.opcode);
1792 for (size_t j = 1; j < opLength; ++j) {
1793 if (sizeof(int32_t) != sizeof(intptr_t))
1794 instructions[i + j].u.pointer = 0;
1795 instructions[i + j].u.operand = pc[i + j].u.operand;
1797 switch (pc[i].u.opcode) {
1800 case op_get_argument_by_val: {
1801 int arrayProfileIndex = pc[i + opLength - 2].u.operand;
1802 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1804 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1807 case op_convert_this:
1809 case op_resolve_base:
1810 case op_resolve_with_base:
1811 case op_resolve_with_this:
1813 case op_call_put_result:
1814 case op_get_callee: {
1815 ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1816 ASSERT(profile->m_bytecodeOffset == -1);
1817 profile->m_bytecodeOffset = i;
1818 instructions[i + opLength - 1] = profile;
1821 case op_put_by_val: {
1822 int arrayProfileIndex = pc[i + opLength - 1].u.operand;
1823 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1824 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1829 case op_new_array_buffer:
1830 case op_new_array_with_size: {
1831 int arrayAllocationProfileIndex = pc[i + opLength - 1].u.operand;
1832 instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1838 case op_call_eval: {
1840 int arrayProfileIndex = pc[i + opLength - 1].u.operand;
1841 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1842 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1845 instructions[i + 4] = &m_llintCallLinkInfos[pc[i + 4].u.operand];
1851 instructions[i + 4] = &m_llintCallLinkInfos[pc[i + 4].u.operand];
1854 case op_get_by_id_out_of_line:
1855 case op_get_by_id_self:
1856 case op_get_by_id_proto:
1857 case op_get_by_id_chain:
1858 case op_get_by_id_getter_self:
1859 case op_get_by_id_getter_proto:
1860 case op_get_by_id_getter_chain:
1861 case op_get_by_id_custom_self:
1862 case op_get_by_id_custom_proto:
1863 case op_get_by_id_custom_chain:
1864 case op_get_by_id_generic:
1865 case op_get_array_length:
1866 case op_get_string_length:
1869 case op_init_global_const_nop: {
1870 ASSERT(codeType() == GlobalCode);
1871 Identifier ident = identifier(pc[i + 4].u.operand);
1872 SymbolTableEntry entry = globalObject->symbolTable()->get(ident.impl());
1876 if (entry.couldBeWatched()) {
1877 instructions[i + 0] = globalData()->interpreter->getOpcode(op_init_global_const_check);
1878 instructions[i + 1] = &globalObject->registerAt(entry.getIndex());
1879 instructions[i + 3] = entry.addressOfIsWatched();
1883 instructions[i + 0] = globalData()->interpreter->getOpcode(op_init_global_const);
1884 instructions[i + 1] = &globalObject->registerAt(entry.getIndex());
1892 m_instructions = WTF::RefCountedArray<Instruction>(instructions);
1894 if (BytecodeGenerator::dumpsGeneratedCode())
1896 m_globalData->finishedCompiling(this);
1899 CodeBlock::~CodeBlock()
1902 // Remove myself from the set of DFG code blocks. Note that I may not be in this set
1903 // (because I'm not a DFG code block), in which case this is a no-op anyway.
1904 m_globalData->heap.m_dfgCodeBlocks.m_set.remove(this);
1907 #if ENABLE(VERBOSE_VALUE_PROFILE)
1908 dumpValueProfiles();
1912 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1913 m_incomingLLIntCalls.begin()->remove();
1914 #endif // ENABLE(LLINT)
1916 // We may be destroyed before any CodeBlocks that refer to us are destroyed.
1917 // Consider that two CodeBlocks become unreachable at the same time. There
1918 // is no guarantee about the order in which the CodeBlocks are destroyed.
1919 // So, if we don't remove incoming calls, and get destroyed before the
1920 // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
1921 // destructor will try to remove nodes from our (no longer valid) linked list.
1922 while (m_incomingCalls.begin() != m_incomingCalls.end())
1923 m_incomingCalls.begin()->remove();
1925 // Note that our outgoing calls will be removed from other CodeBlocks'
1926 // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
1929 for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i)
1930 m_structureStubInfos[i].deref();
1931 #endif // ENABLE(JIT)
1933 #if DUMP_CODE_BLOCK_STATISTICS
1934 liveCodeBlockSet.remove(this);
1938 void CodeBlock::setNumParameters(int newValue)
1940 m_numParameters = newValue;
1942 #if ENABLE(VALUE_PROFILER)
1943 m_argumentValueProfiles.resize(newValue);
1947 void CodeBlock::visitStructures(SlotVisitor& visitor, Instruction* vPC)
1949 Interpreter* interpreter = m_globalData->interpreter;
1951 if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id) && vPC[4].u.structure) {
1952 visitor.append(&vPC[4].u.structure);
1956 if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_self) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_self)) {
1957 visitor.append(&vPC[4].u.structure);
1960 if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_proto) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_proto)) {
1961 visitor.append(&vPC[4].u.structure);
1962 visitor.append(&vPC[5].u.structure);
1965 if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_chain)) {
1966 visitor.append(&vPC[4].u.structure);
1967 if (vPC[5].u.structureChain)
1968 visitor.append(&vPC[5].u.structureChain);
1971 if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_transition)) {
1972 visitor.append(&vPC[4].u.structure);
1973 visitor.append(&vPC[5].u.structure);
1974 if (vPC[6].u.structureChain)
1975 visitor.append(&vPC[6].u.structureChain);
1978 if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id) && vPC[4].u.structure) {
1979 visitor.append(&vPC[4].u.structure);
1982 if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_replace)) {
1983 visitor.append(&vPC[4].u.structure);
1987 // These instructions don't ref their Structures.
1988 ASSERT(vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_get_array_length) || vPC[0].u.opcode == interpreter->getOpcode(op_get_string_length));
1991 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
1993 EvalCacheMap::iterator end = m_cacheMap.end();
1994 for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
1995 visitor.append(&ptr->value);
1998 void CodeBlock::visitAggregate(SlotVisitor& visitor)
2000 #if ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
2002 // I may be asked to scan myself more than once, and it may even happen concurrently.
2003 // To this end, use a CAS loop to check if I've been called already. Only one thread
2004 // may proceed past this point - whichever one wins the CAS race.
2007 oldValue = m_dfgData->visitAggregateHasBeenCalled;
2009 // Looks like someone else won! Return immediately to ensure that we don't
2010 // trace the same CodeBlock concurrently. Doing so is hazardous since we will
2011 // be mutating the state of ValueProfiles, which contain JSValues, which can
2012 // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
2013 // that are nearly impossible to track down.
2015 // Also note that it must be safe to return early as soon as we see the
2016 // value true (well, (unsigned)1), since once a GC thread is in this method
2017 // and has won the CAS race (i.e. was responsible for setting the value true)
2018 // it will definitely complete the rest of this method before declaring
2022 } while (!WTF::weakCompareAndSwap(&m_dfgData->visitAggregateHasBeenCalled, 0, 1));
2024 #endif // ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
2026 if (!!m_alternative)
2027 m_alternative->visitAggregate(visitor);
2029 visitor.append(&m_unlinkedCode);
2031 // There are three things that may use unconditional finalizers: lazy bytecode freeing,
2032 // inline cache clearing, and jettisoning. The probability of us wanting to do at
2033 // least one of those things is probably quite close to 1. So we add one no matter what
2034 // and when it runs, it figures out whether it has any work to do.
2035 visitor.addUnconditionalFinalizer(this);
2037 if (shouldImmediatelyAssumeLivenessDuringScan()) {
2038 // This code block is live, so scan all references strongly and return.
2039 stronglyVisitStrongReferences(visitor);
2040 stronglyVisitWeakReferences(visitor);
2045 // We get here if we're live in the sense that our owner executable is live,
2046 // but we're not yet live for sure in another sense: we may yet decide that this
2047 // code block should be jettisoned based on its outgoing weak references being
2048 // stale. Set a flag to indicate that we're still assuming that we're dead, and
2049 // perform one round of determining if we're live. The GC may determine, based on
2050 // either us marking additional objects, or by other objects being marked for
2051 // other reasons, that this iteration should run again; it will notify us of this
2052 // decision by calling harvestWeakReferences().
2054 m_dfgData->livenessHasBeenProved = false;
2055 m_dfgData->allTransitionsHaveBeenMarked = false;
2057 performTracingFixpointIteration(visitor);
2059 // GC doesn't have enough information yet for us to decide whether to keep our DFG
2060 // data, so we need to register a handler to run again at the end of GC, when more
2061 // information is available.
2062 if (!(m_dfgData->livenessHasBeenProved && m_dfgData->allTransitionsHaveBeenMarked))
2063 visitor.addWeakReferenceHarvester(this);
2065 #else // ENABLE(DFG_JIT)
2066 ASSERT_NOT_REACHED();
2067 #endif // ENABLE(DFG_JIT)
2070 void CodeBlock::performTracingFixpointIteration(SlotVisitor& visitor)
2072 UNUSED_PARAM(visitor);
2075 // Evaluate our weak reference transitions, if there are still some to evaluate.
2076 if (!m_dfgData->allTransitionsHaveBeenMarked) {
2077 bool allAreMarkedSoFar = true;
2078 for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) {
2079 if ((!m_dfgData->transitions[i].m_codeOrigin
2080 || Heap::isMarked(m_dfgData->transitions[i].m_codeOrigin.get()))
2081 && Heap::isMarked(m_dfgData->transitions[i].m_from.get())) {
2082 // If the following three things are live, then the target of the
2083 // transition is also live:
2084 // - This code block. We know it's live already because otherwise
2085 // we wouldn't be scanning ourselves.
2086 // - The code origin of the transition. Transitions may arise from
2087 // code that was inlined. They are not relevant if the user's
2088 // object that is required for the inlinee to run is no longer
2090 // - The source of the transition. The transition checks if some
2091 // heap location holds the source, and if so, stores the target.
2092 // Hence the source must be live for the transition to be live.
2093 visitor.append(&m_dfgData->transitions[i].m_to);
2095 allAreMarkedSoFar = false;
2098 if (allAreMarkedSoFar)
2099 m_dfgData->allTransitionsHaveBeenMarked = true;
2102 // Check if we have any remaining work to do.
2103 if (m_dfgData->livenessHasBeenProved)
2106 // Now check all of our weak references. If all of them are live, then we
2107 // have proved liveness and so we scan our strong references. If at end of
2108 // GC we still have not proved liveness, then this code block is toast.
2109 bool allAreLiveSoFar = true;
2110 for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i) {
2111 if (!Heap::isMarked(m_dfgData->weakReferences[i].get())) {
2112 allAreLiveSoFar = false;
2117 // If some weak references are dead, then this fixpoint iteration was
2119 if (!allAreLiveSoFar)
2122 // All weak references are live. Record this information so we don't
2123 // come back here again, and scan the strong references.
2124 m_dfgData->livenessHasBeenProved = true;
2125 stronglyVisitStrongReferences(visitor);
2126 #endif // ENABLE(DFG_JIT)
2129 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2131 performTracingFixpointIteration(visitor);
2134 #if ENABLE(JIT_VERBOSE_OSR)
2135 static const bool verboseUnlinking = true;
2137 static const bool verboseUnlinking = false;
2140 void CodeBlock::finalizeUnconditionally()
2143 Interpreter* interpreter = m_globalData->interpreter;
2144 if (!!numberOfInstructions()) {
2145 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2146 for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2147 Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2148 switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2150 case op_get_by_id_out_of_line:
2152 case op_put_by_id_out_of_line:
2153 if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2155 if (verboseUnlinking)
2156 dataLog("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2157 curInstruction[4].u.structure.clear();
2158 curInstruction[5].u.operand = 0;
2160 case op_put_by_id_transition_direct:
2161 case op_put_by_id_transition_normal:
2162 case op_put_by_id_transition_direct_out_of_line:
2163 case op_put_by_id_transition_normal_out_of_line:
2164 if (Heap::isMarked(curInstruction[4].u.structure.get())
2165 && Heap::isMarked(curInstruction[6].u.structure.get())
2166 && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2168 if (verboseUnlinking) {
2169 dataLog("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2170 curInstruction[4].u.structure.get(),
2171 curInstruction[6].u.structure.get(),
2172 curInstruction[7].u.structureChain.get());
2174 curInstruction[4].u.structure.clear();
2175 curInstruction[6].u.structure.clear();
2176 curInstruction[7].u.structureChain.clear();
2177 curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2179 case op_get_array_length:
2182 ASSERT_NOT_REACHED();
2186 for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2187 if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2188 if (verboseUnlinking)
2189 dataLog("Clearing LLInt call from %p.\n", this);
2190 m_llintCallLinkInfos[i].unlink();
2192 if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2193 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2196 #endif // ENABLE(LLINT)
2199 // Check if we're not live. If we are, then jettison.
2200 if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_dfgData->livenessHasBeenProved)) {
2201 if (verboseUnlinking)
2202 dataLog("Code block %p (executable %p) has dead weak references, jettisoning during GC.\n", this, ownerExecutable());
2204 // Make sure that the baseline JIT knows that it should re-warm-up before
2206 alternative()->optimizeAfterWarmUp();
2208 if (DFG::shouldShowDisassembly()) {
2209 dataLog("DFG CodeBlock %p will be jettisoned because of the following dead references:\n", this);
2210 for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) {
2211 WeakReferenceTransition& transition = m_dfgData->transitions[i];
2212 JSCell* origin = transition.m_codeOrigin.get();
2213 JSCell* from = transition.m_from.get();
2214 JSCell* to = transition.m_to.get();
2215 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2217 dataLog(" Transition under %s, ", JSValue(origin).description());
2218 dataLog("%s -> ", JSValue(from).description());
2219 dataLog("%s.\n", JSValue(to).description());
2221 for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i) {
2222 JSCell* weak = m_dfgData->weakReferences[i].get();
2223 if (Heap::isMarked(weak))
2225 dataLog(" Weak reference %s.\n", JSValue(weak).description());
2232 #endif // ENABLE(DFG_JIT)
2234 for (size_t size = m_putToBaseOperations.size(), i = 0; i < size; ++i) {
2235 if (m_putToBaseOperations[i].m_structure && !Heap::isMarked(m_putToBaseOperations[i].m_structure.get())) {
2236 if (verboseUnlinking)
2237 dataLog("Clearing putToBase info in %p.\n", this);
2238 m_putToBaseOperations[i].m_structure.clear();
2241 for (size_t size = m_resolveOperations.size(), i = 0; i < size; ++i) {
2242 if (m_resolveOperations[i].isEmpty())
2245 for (size_t insnSize = m_resolveOperations[i].size() - 1, k = 0; k < insnSize; ++k)
2246 ASSERT(!m_resolveOperations[i][k].m_structure);
2248 m_resolveOperations[i].last().m_structure.clear();
2249 if (m_resolveOperations[i].last().m_structure && !Heap::isMarked(m_resolveOperations[i].last().m_structure.get())) {
2250 if (verboseUnlinking)
2251 dataLog("Clearing resolve info in %p.\n", this);
2252 m_resolveOperations[i].last().m_structure.clear();
2257 // Handle inline caches.
2258 if (!!getJITCode()) {
2259 RepatchBuffer repatchBuffer(this);
2260 for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
2261 if (callLinkInfo(i).isLinked()) {
2262 if (ClosureCallStubRoutine* stub = callLinkInfo(i).stub.get()) {
2263 if (!Heap::isMarked(stub->structure())
2264 || !Heap::isMarked(stub->executable())) {
2265 if (verboseUnlinking)
2266 dataLog("Clearing closure call from %p to %p, stub routine %p.\n", this, stub->executable(), stub);
2267 callLinkInfo(i).unlink(*m_globalData, repatchBuffer);
2269 } else if (!Heap::isMarked(callLinkInfo(i).callee.get())) {
2270 if (verboseUnlinking)
2271 dataLog("Clearing call from %p to %p.\n", this, callLinkInfo(i).callee.get());
2272 callLinkInfo(i).unlink(*m_globalData, repatchBuffer);
2275 if (!!callLinkInfo(i).lastSeenCallee
2276 && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
2277 callLinkInfo(i).lastSeenCallee.clear();
2279 for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i) {
2280 StructureStubInfo& stubInfo = m_structureStubInfos[i];
2282 if (stubInfo.visitWeakReferences())
2285 resetStubInternal(repatchBuffer, stubInfo);
2292 void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2294 if (stubInfo.accessType == access_unset)
2297 RepatchBuffer repatchBuffer(this);
2298 resetStubInternal(repatchBuffer, stubInfo);
2301 void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2303 AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2305 if (verboseUnlinking)
2306 dataLog("Clearing structure cache (kind %d) in %p.\n", stubInfo.accessType, this);
2308 if (isGetByIdAccess(accessType)) {
2309 if (getJITCode().jitType() == JITCode::DFGJIT)
2310 DFG::dfgResetGetByID(repatchBuffer, stubInfo);
2312 JIT::resetPatchGetById(repatchBuffer, &stubInfo);
2314 ASSERT(isPutByIdAccess(accessType));
2315 if (getJITCode().jitType() == JITCode::DFGJIT)
2316 DFG::dfgResetPutByID(repatchBuffer, stubInfo);
2318 JIT::resetPatchPutById(repatchBuffer, &stubInfo);
2325 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2327 visitor.append(&m_globalObject);
2328 visitor.append(&m_ownerExecutable);
2329 visitor.append(&m_unlinkedCode);
2331 m_rareData->m_evalCodeCache.visitAggregate(visitor);
2332 visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2333 for (size_t i = 0; i < m_functionExprs.size(); ++i)
2334 visitor.append(&m_functionExprs[i]);
2335 for (size_t i = 0; i < m_functionDecls.size(); ++i)
2336 visitor.append(&m_functionDecls[i]);
2338 updateAllPredictions(Collection);
2341 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2343 UNUSED_PARAM(visitor);
2349 for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) {
2350 if (!!m_dfgData->transitions[i].m_codeOrigin)
2351 visitor.append(&m_dfgData->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2352 visitor.append(&m_dfgData->transitions[i].m_from);
2353 visitor.append(&m_dfgData->transitions[i].m_to);
2356 for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i)
2357 visitor.append(&m_dfgData->weakReferences[i]);
2361 #if ENABLE(BYTECODE_COMMENTS)
2362 // Finds the comment string for the specified bytecode offset/PC is available.
2363 const char* CodeBlock::commentForBytecodeOffset(unsigned bytecodeOffset)
2365 ASSERT(bytecodeOffset < instructions().size());
2367 Vector<Comment>& comments = m_bytecodeComments;
2368 size_t numberOfComments = comments.size();
2369 const char* result = 0;
2371 if (!numberOfComments)
2372 return 0; // No comments to match with.
2374 // The next match is most likely the next comment in the list.
2375 // Do a quick check to see if that is a match first.
2376 // m_bytecodeCommentIterator should already be pointing to the
2377 // next comment we should check.
2379 ASSERT(m_bytecodeCommentIterator < comments.size());
2381 size_t i = m_bytecodeCommentIterator;
2382 size_t commentPC = comments[i].pc;
2383 if (commentPC == bytecodeOffset) {
2384 // We've got a match. All done!
2385 m_bytecodeCommentIterator = i;
2386 result = comments[i].string;
2387 } else if (commentPC > bytecodeOffset) {
2388 // The current comment is already greater than the requested PC.
2389 // Start searching from the first comment.
2392 // Otherwise, the current comment's PC is less than the requested PC.
2393 // Hence, we can just start searching from the next comment in the
2398 // If the result is still not found, do a linear search in the range
2399 // that we've determined above.
2401 for (; i < comments.size(); ++i) {
2402 commentPC = comments[i].pc;
2403 if (commentPC == bytecodeOffset) {
2404 result = comments[i].string;
2407 if (comments[i].pc > bytecodeOffset) {
2408 // The current comment PC is already past the requested
2409 // bytecodeOffset. Hence, there are no more possible
2410 // matches. Just fail.
2416 // Update the iterator to point to the next comment.
2417 if (++i >= numberOfComments) {
2418 // At most point to the last comment entry. This ensures that the
2419 // next time we call this function, the quick checks will at least
2420 // have one entry to check and can fail fast if appropriate.
2421 i = numberOfComments - 1;
2423 m_bytecodeCommentIterator = i;
2427 void CodeBlock::dumpBytecodeComments()
2429 Vector<Comment>& comments = m_bytecodeComments;
2430 printf("Comments for codeblock %p: size %lu\n", this, comments.size());
2431 for (size_t i = 0; i < comments.size(); ++i)
2432 printf(" pc %lu : '%s'\n", comments[i].pc, comments[i].string);
2433 printf("End of comments for codeblock %p\n", this);
2435 #endif // ENABLE_BYTECODE_COMMENTS
2437 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
2439 ASSERT(bytecodeOffset < instructions().size());
2444 Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2445 for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2446 // Handlers are ordered innermost first, so the first handler we encounter
2447 // that contains the source address is the correct handler to use.
2448 if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end >= bytecodeOffset)
2449 return &exceptionHandlers[i];
2455 int CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2457 ASSERT(bytecodeOffset < instructions().size());
2458 return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2461 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset)
2463 m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset);
2464 divot += m_sourceOffset;
2467 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2470 m_llintCallLinkInfos.shrinkToFit();
2473 m_structureStubInfos.shrinkToFit();
2474 m_callLinkInfos.shrinkToFit();
2476 #if ENABLE(VALUE_PROFILER)
2477 if (shrinkMode == EarlyShrink)
2478 m_argumentValueProfiles.shrinkToFit();
2479 m_rareCaseProfiles.shrinkToFit();
2480 m_specialFastCaseProfiles.shrinkToFit();
2483 if (shrinkMode == EarlyShrink) {
2484 m_identifiers.shrinkToFit();
2485 m_functionDecls.shrinkToFit();
2486 m_functionExprs.shrinkToFit();
2487 m_constantRegisters.shrinkToFit();
2488 } // else don't shrink these, because we would have already pointed pointers into these tables.
2491 m_rareData->m_exceptionHandlers.shrinkToFit();
2492 m_rareData->m_immediateSwitchJumpTables.shrinkToFit();
2493 m_rareData->m_characterSwitchJumpTables.shrinkToFit();
2494 m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2496 m_rareData->m_callReturnIndexVector.shrinkToFit();
2499 m_rareData->m_inlineCallFrames.shrinkToFit();
2500 m_rareData->m_codeOrigins.shrinkToFit();
2506 m_dfgData->osrEntry.shrinkToFit();
2507 m_dfgData->osrExit.shrinkToFit();
2508 m_dfgData->speculationRecovery.shrinkToFit();
2509 m_dfgData->weakReferences.shrinkToFit();
2510 m_dfgData->transitions.shrinkToFit();
2511 m_dfgData->minifiedDFG.prepareAndShrink();
2512 m_dfgData->variableEventStream.shrinkToFit();
2517 void CodeBlock::createActivation(CallFrame* callFrame)
2519 ASSERT(codeType() == FunctionCode);
2520 ASSERT(needsFullScopeChain());
2521 ASSERT(!callFrame->uncheckedR(activationRegister()).jsValue());
2522 JSActivation* activation = JSActivation::create(callFrame->globalData(), callFrame, this);
2523 callFrame->uncheckedR(activationRegister()) = JSValue(activation);
2524 callFrame->setScope(activation);
2527 unsigned CodeBlock::addOrFindConstant(JSValue v)
2529 unsigned numberOfConstants = numberOfConstantRegisters();
2530 for (unsigned i = 0; i < numberOfConstants; ++i) {
2531 if (getConstant(FirstConstantRegisterIndex + i) == v)
2534 return addConstant(v);
2538 void CodeBlock::unlinkCalls()
2540 if (!!m_alternative)
2541 m_alternative->unlinkCalls();
2543 for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2544 if (m_llintCallLinkInfos[i].isLinked())
2545 m_llintCallLinkInfos[i].unlink();
2548 if (!m_callLinkInfos.size())
2550 if (!m_globalData->canUseJIT())
2552 RepatchBuffer repatchBuffer(this);
2553 for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
2554 if (!m_callLinkInfos[i].isLinked())
2556 m_callLinkInfos[i].unlink(*m_globalData, repatchBuffer);
2560 void CodeBlock::unlinkIncomingCalls()
2563 while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2564 m_incomingLLIntCalls.begin()->unlink();
2566 if (m_incomingCalls.isEmpty())
2568 RepatchBuffer repatchBuffer(this);
2569 while (m_incomingCalls.begin() != m_incomingCalls.end())
2570 m_incomingCalls.begin()->unlink(*m_globalData, repatchBuffer);
2572 #endif // ENABLE(JIT)
2575 Instruction* CodeBlock::adjustPCIfAtCallSite(Instruction* potentialReturnPC)
2577 ASSERT(potentialReturnPC);
2579 unsigned returnPCOffset = potentialReturnPC - instructions().begin();
2580 Instruction* adjustedPC;
2581 unsigned opcodeLength;
2583 // If we are at a callsite, the LLInt stores the PC after the call
2584 // instruction rather than the PC of the call instruction. This requires
2585 // some correcting. If so, we can rely on the fact that the preceding
2586 // instruction must be one of the call instructions, so either it's a
2587 // call_varargs or it's a call, construct, or eval.
2589 // If we are not at a call site, then we need to guard against the
2590 // possibility of peeking past the start of the bytecode range for this
2591 // codeBlock. Hence, we do a bounds check before we peek at the
2592 // potential "preceding" instruction.
2593 // The bounds check is done by comparing the offset of the potential
2594 // returnPC with the length of the opcode. If there is room for a call
2595 // instruction before the returnPC, then the offset of the returnPC must
2596 // be greater than the size of the call opcode we're looking for.
2598 // The determination of the call instruction present (if we are at a
2599 // callsite) depends on the following assumptions. So, assert that
2600 // they are still true:
2601 ASSERT(OPCODE_LENGTH(op_call_varargs) <= OPCODE_LENGTH(op_call));
2602 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
2603 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
2605 // Check for the case of a preceeding op_call_varargs:
2606 opcodeLength = OPCODE_LENGTH(op_call_varargs);
2607 adjustedPC = potentialReturnPC - opcodeLength;
2608 if ((returnPCOffset >= opcodeLength)
2609 && (adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call_varargs))) {
2613 // Check for the case of the other 3 call instructions:
2614 opcodeLength = OPCODE_LENGTH(op_call);
2615 adjustedPC = potentialReturnPC - opcodeLength;
2616 if ((returnPCOffset >= opcodeLength)
2617 && (adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call)
2618 || adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_construct)
2619 || adjustedPC->u.pointer == LLInt::getCodePtr(llint_op_call_eval))) {
2623 // Not a call site. No need to adjust PC. Just return the original.
2624 return potentialReturnPC;
2626 #endif // ENABLE(LLINT)
2629 ClosureCallStubRoutine* CodeBlock::findClosureCallForReturnPC(ReturnAddressPtr returnAddress)
2631 for (unsigned i = m_callLinkInfos.size(); i--;) {
2632 CallLinkInfo& info = m_callLinkInfos[i];
2635 if (!info.stub->code().executableMemory()->contains(returnAddress.value()))
2638 return info.stub.get();
2641 // The stub routine may have been jettisoned. This is rare, but we have to handle it.
2642 const JITStubRoutineSet& set = m_globalData->heap.jitStubRoutines();
2643 for (unsigned i = set.size(); i--;) {
2644 GCAwareJITStubRoutine* genericStub = set.at(i);
2645 if (!genericStub->isClosureCall())
2647 ClosureCallStubRoutine* stub = static_cast<ClosureCallStubRoutine*>(genericStub);
2648 if (!stub->code().executableMemory()->contains(returnAddress.value()))
2657 unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddress)
2660 UNUSED_PARAM(returnAddress);
2662 #if !ENABLE(LLINT_C_LOOP)
2663 // When using the JIT, we could have addresses that are not bytecode
2664 // addresses. We check if the return address is in the LLint glue and
2665 // opcode handlers range here to ensure that we are looking at bytecode
2666 // before attempting to convert the return address into a bytecode offset.
2668 // In the case of the C Loop LLInt, the JIT is disabled, and the only
2669 // valid return addresses should be bytecode PCs. So, we can and need to
2670 // forego this check because when we do not ENABLE(COMPUTED_GOTO_OPCODES),
2671 // then the bytecode "PC"s are actually the opcodeIDs and are not bounded
2672 // by llint_begin and llint_end.
2673 if (returnAddress.value() >= LLInt::getCodePtr(llint_begin)
2674 && returnAddress.value() <= LLInt::getCodePtr(llint_end))
2677 ASSERT(exec->codeBlock());
2678 ASSERT(exec->codeBlock() == this);
2679 ASSERT(JITCode::isBaselineCode(getJITType()));
2680 Instruction* instruction = exec->currentVPC();
2681 ASSERT(instruction);
2683 instruction = adjustPCIfAtCallSite(instruction);
2684 return bytecodeOffset(instruction);
2686 #endif // !ENABLE(LLINT)
2691 Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
2692 if (!callIndices.size())
2695 if (getJITCode().getExecutableMemory()->contains(returnAddress.value())) {
2696 unsigned callReturnOffset = getJITCode().offsetOf(returnAddress.value());
2697 CallReturnOffsetToBytecodeOffset* result =
2698 binarySearch<CallReturnOffsetToBytecodeOffset, unsigned, getCallReturnOffset>(callIndices.begin(), callIndices.size(), callReturnOffset);
2699 ASSERT(result->callReturnOffset == callReturnOffset);
2700 return result->bytecodeOffset;
2703 return findClosureCallForReturnPC(returnAddress)->codeOrigin().bytecodeIndex;
2704 #endif // ENABLE(JIT)
2706 #if !ENABLE(LLINT) && !ENABLE(JIT)
2712 bool CodeBlock::codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin& codeOrigin)
2714 if (!hasCodeOrigins())
2717 if (!getJITCode().getExecutableMemory()->contains(returnAddress.value())) {
2718 codeOrigin = findClosureCallForReturnPC(returnAddress)->codeOrigin();
2722 unsigned offset = getJITCode().offsetOf(returnAddress.value());
2723 CodeOriginAtCallReturnOffset* entry = binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), offset, WTF::KeyMustNotBePresentInArray);
2724 if (entry->callReturnOffset != offset)
2726 codeOrigin = entry->codeOrigin;
2729 #endif // ENABLE(DFG_JIT)
2731 void CodeBlock::clearEvalCache()
2733 if (!!m_alternative)
2734 m_alternative->clearEvalCache();
2737 m_rareData->m_evalCodeCache.clear();
2740 template<typename T>
2741 inline void replaceExistingEntries(Vector<T>& target, Vector<T>& source)
2743 ASSERT(target.size() <= source.size());
2744 for (size_t i = 0; i < target.size(); ++i)
2745 target[i] = source[i];
2748 void CodeBlock::copyPostParseDataFrom(CodeBlock* alternative)
2753 replaceExistingEntries(m_constantRegisters, alternative->m_constantRegisters);
2754 replaceExistingEntries(m_functionDecls, alternative->m_functionDecls);
2755 replaceExistingEntries(m_functionExprs, alternative->m_functionExprs);
2756 if (!!m_rareData && !!alternative->m_rareData)
2757 replaceExistingEntries(m_rareData->m_constantBuffers, alternative->m_rareData->m_constantBuffers);
2760 void CodeBlock::copyPostParseDataFromAlternative()
2762 copyPostParseDataFrom(m_alternative.get());
2766 void CodeBlock::reoptimize()
2768 ASSERT(replacement() != this);
2769 ASSERT(replacement()->alternative() == this);
2770 replacement()->tallyFrequentExitSites();
2771 if (DFG::shouldShowDisassembly())
2772 dataLog("DFG CodeBlock %p will be jettisoned due to reoptimization of %p.\n", replacement(), this);
2773 replacement()->jettison();
2774 countReoptimization();
2775 optimizeAfterWarmUp();
2778 CodeBlock* ProgramCodeBlock::replacement()
2780 return &static_cast<ProgramExecutable*>(ownerExecutable())->generatedBytecode();
2783 CodeBlock* EvalCodeBlock::replacement()
2785 return &static_cast<EvalExecutable*>(ownerExecutable())->generatedBytecode();
2788 CodeBlock* FunctionCodeBlock::replacement()
2790 return &static_cast<FunctionExecutable*>(ownerExecutable())->generatedBytecodeFor(m_isConstructor ? CodeForConstruct : CodeForCall);
2793 JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex)
2795 if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
2797 JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scope, bytecodeIndex);
2801 JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex)
2803 if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
2805 JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scope, bytecodeIndex);
2809 JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex)
2811 if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
2813 JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scope, bytecodeIndex, m_isConstructor ? CodeForConstruct : CodeForCall);
2817 DFG::CapabilityLevel ProgramCodeBlock::canCompileWithDFGInternal()
2819 return DFG::canCompileProgram(this);
2822 DFG::CapabilityLevel EvalCodeBlock::canCompileWithDFGInternal()
2824 return DFG::canCompileEval(this);
2827 DFG::CapabilityLevel FunctionCodeBlock::canCompileWithDFGInternal()
2829 if (m_isConstructor)
2830 return DFG::canCompileFunctionForConstruct(this);
2831 return DFG::canCompileFunctionForCall(this);
2834 void ProgramCodeBlock::jettison()
2836 ASSERT(JITCode::isOptimizingJIT(getJITType()));
2837 ASSERT(this == replacement());
2838 if (DFG::shouldShowDisassembly())
2839 dataLog("Jettisoning DFG CodeBlock %p.\n", this);
2840 static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
2843 void EvalCodeBlock::jettison()
2845 ASSERT(JITCode::isOptimizingJIT(getJITType()));
2846 ASSERT(this == replacement());
2847 if (DFG::shouldShowDisassembly())
2848 dataLog("Jettisoning DFG CodeBlock %p.\n", this);
2849 static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
2852 void FunctionCodeBlock::jettison()
2854 ASSERT(JITCode::isOptimizingJIT(getJITType()));
2855 ASSERT(this == replacement());
2856 if (DFG::shouldShowDisassembly())
2857 dataLog("Jettisoning DFG CodeBlock %p.\n", this);
2858 static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*globalData(), m_isConstructor ? CodeForConstruct : CodeForCall);
2861 bool ProgramCodeBlock::jitCompileImpl(ExecState* exec)
2863 ASSERT(getJITType() == JITCode::InterpreterThunk);
2864 ASSERT(this == replacement());
2865 return static_cast<ProgramExecutable*>(ownerExecutable())->jitCompile(exec);
2868 bool EvalCodeBlock::jitCompileImpl(ExecState* exec)
2870 ASSERT(getJITType() == JITCode::InterpreterThunk);
2871 ASSERT(this == replacement());
2872 return static_cast<EvalExecutable*>(ownerExecutable())->jitCompile(exec);
2875 bool FunctionCodeBlock::jitCompileImpl(ExecState* exec)
2877 ASSERT(getJITType() == JITCode::InterpreterThunk);
2878 ASSERT(this == replacement());
2879 return static_cast<FunctionExecutable*>(ownerExecutable())->jitCompileFor(exec, m_isConstructor ? CodeForConstruct : CodeForCall);
2883 #if ENABLE(VALUE_PROFILER)
2884 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
2886 for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
2887 if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
2888 return &m_arrayProfiles[i];
2893 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
2895 ArrayProfile* result = getArrayProfile(bytecodeOffset);
2898 return addArrayProfile(bytecodeOffset);
2901 void CodeBlock::updateAllPredictionsAndCountLiveness(
2902 OperationInProgress operation, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
2904 numberOfLiveNonArgumentValueProfiles = 0;
2905 numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
2906 for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
2907 ValueProfile* profile = getFromAllValueProfiles(i);
2908 unsigned numSamples = profile->totalNumberOfSamples();
2909 if (numSamples > ValueProfile::numberOfBuckets)
2910 numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
2911 numberOfSamplesInProfiles += numSamples;
2912 if (profile->m_bytecodeOffset < 0) {
2913 profile->computeUpdatedPrediction(operation);
2916 if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
2917 numberOfLiveNonArgumentValueProfiles++;
2918 profile->computeUpdatedPrediction(operation);
2922 m_lazyOperandValueProfiles.computeUpdatedPredictions(operation);
2926 void CodeBlock::updateAllValueProfilePredictions(OperationInProgress operation)
2928 unsigned ignoredValue1, ignoredValue2;
2929 updateAllPredictionsAndCountLiveness(operation, ignoredValue1, ignoredValue2);
2932 void CodeBlock::updateAllArrayPredictions(OperationInProgress operation)
2934 for (unsigned i = m_arrayProfiles.size(); i--;)
2935 m_arrayProfiles[i].computeUpdatedPrediction(this, operation);
2937 // Don't count these either, for similar reasons.
2938 for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
2939 m_arrayAllocationProfiles[i].updateIndexingType();
2942 void CodeBlock::updateAllPredictions(OperationInProgress operation)
2944 updateAllValueProfilePredictions(operation);
2945 updateAllArrayPredictions(operation);
2948 bool CodeBlock::shouldOptimizeNow()
2950 #if ENABLE(JIT_VERBOSE_OSR)
2951 dataLog("Considering optimizing %p...\n", this);
2954 #if ENABLE(VERBOSE_VALUE_PROFILE)
2955 dumpValueProfiles();
2958 if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
2961 updateAllArrayPredictions();
2963 unsigned numberOfLiveNonArgumentValueProfiles;
2964 unsigned numberOfSamplesInProfiles;
2965 updateAllPredictionsAndCountLiveness(NoOperation, numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
2967 #if ENABLE(JIT_VERBOSE_OSR)
2968 dataLog("Profile hotness: %lf (%u / %u), %lf (%u / %u)\n", (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(), numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(), (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(), numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
2971 if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
2972 && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
2973 && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
2976 ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
2977 m_optimizationDelayCounter++;
2978 optimizeAfterWarmUp();
2984 void CodeBlock::tallyFrequentExitSites()
2986 ASSERT(getJITType() == JITCode::DFGJIT);
2987 ASSERT(alternative()->getJITType() == JITCode::BaselineJIT);
2988 ASSERT(!!m_dfgData);
2990 CodeBlock* profiledBlock = alternative();
2992 for (unsigned i = 0; i < m_dfgData->osrExit.size(); ++i) {
2993 DFG::OSRExit& exit = m_dfgData->osrExit[i];
2995 if (!exit.considerAddingAsFrequentExitSite(this, profiledBlock))
2998 #if DFG_ENABLE(DEBUG_VERBOSE)
2999 dataLog("OSR exit #%u (bc#%u, @%u, %s) for code block %p occurred frequently; counting as frequent exit site.\n", i, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, DFG::exitKindToString(exit.m_kind), this);
3003 #endif // ENABLE(DFG_JIT)
3005 #if ENABLE(VERBOSE_VALUE_PROFILE)
3006 void CodeBlock::dumpValueProfiles()
3008 dataLog("ValueProfile for %p:\n", this);
3009 for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3010 ValueProfile* profile = getFromAllValueProfiles(i);
3011 if (profile->m_bytecodeOffset < 0) {
3012 ASSERT(profile->m_bytecodeOffset == -1);
3013 dataLog(" arg = %u: ", i);
3015 dataLog(" bc = %d: ", profile->m_bytecodeOffset);
3016 if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
3017 dataLog("<empty>\n");
3020 profile->dump(WTF::dataFile());
3023 dataLog("RareCaseProfile for %p:\n", this);
3024 for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
3025 RareCaseProfile* profile = rareCaseProfile(i);
3026 dataLog(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
3028 dataLog("SpecialFastCaseProfile for %p:\n", this);
3029 for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
3030 RareCaseProfile* profile = specialFastCaseProfile(i);
3031 dataLog(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
3034 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
3036 size_t CodeBlock::predictedMachineCodeSize()
3038 // This will be called from CodeBlock::CodeBlock before either m_globalData or the
3039 // instructions have been initialized. It's OK to return 0 because what will really
3040 // matter is the recomputation of this value when the slow path is triggered.
3044 if (!m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT)
3045 return 0; // It's as good of a prediction as we'll get.
3047 // Be conservative: return a size that will be an overestimation 84% of the time.
3048 double multiplier = m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
3049 m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
3051 // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
3052 // here is OK, since this whole method is just a heuristic.
3053 if (multiplier < 0 || multiplier > 1000)
3056 double doubleResult = multiplier * m_instructions.size();
3058 // Be even more paranoid: silently reject values that won't fit into a size_t. If
3059 // the function is so huge that we can't even fit it into virtual memory then we
3060 // should probably have some other guards in place to prevent us from even getting
3062 if (doubleResult > std::numeric_limits<size_t>::max())
3065 return static_cast<size_t>(doubleResult);
3068 bool CodeBlock::usesOpcode(OpcodeID opcodeID)
3070 Interpreter* interpreter = globalData()->interpreter;
3071 Instruction* instructionsBegin = instructions().begin();
3072 unsigned instructionCount = instructions().size();
3074 for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
3075 switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
3076 #define DEFINE_OP(curOpcode, length) \
3078 if (curOpcode == opcodeID) \
3080 bytecodeOffset += length; \
3082 FOR_EACH_OPCODE_ID(DEFINE_OP)
3085 ASSERT_NOT_REACHED();
3093 String CodeBlock::nameForRegister(int registerNumber)
3095 SymbolTable::iterator end = symbolTable()->end();
3096 for (SymbolTable::iterator ptr = symbolTable()->begin(); ptr != end; ++ptr) {
3097 if (ptr->value.getIndex() == registerNumber)
3098 return String(ptr->key);
3100 if (needsActivation() && registerNumber == activationRegister())
3101 return ASCIILiteral("activation");
3102 if (registerNumber == thisRegister())
3103 return ASCIILiteral("this");
3104 if (usesArguments()) {
3105 if (registerNumber == argumentsRegister())
3106 return ASCIILiteral("arguments");
3107 if (unmodifiedArgumentsRegister(argumentsRegister()) == registerNumber)
3108 return ASCIILiteral("real arguments");
3110 if (registerNumber < 0) {
3111 int argumentPosition = -registerNumber;
3112 argumentPosition -= JSStack::CallFrameHeaderSize + 1;
3113 return String::format("arguments[%3d]", argumentPosition - 1).impl();