CodeBlock compilation and installation should be simplified and rationalized
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "BytecodeGenerator.h"
34 #include "CallLinkStatus.h"
35 #include "DFGCapabilities.h"
36 #include "DFGCommon.h"
37 #include "DFGDriver.h"
38 #include "DFGNode.h"
39 #include "DFGRepatch.h"
40 #include "DFGWorklist.h"
41 #include "Debugger.h"
42 #include "Interpreter.h"
43 #include "JIT.h"
44 #include "JITStubs.h"
45 #include "JSActivation.h"
46 #include "JSCJSValue.h"
47 #include "JSFunction.h"
48 #include "JSNameScope.h"
49 #include "LLIntEntrypoints.h"
50 #include "LowLevelInterpreter.h"
51 #include "Operations.h"
52 #include "PolymorphicPutByIdList.h"
53 #include "ReduceWhitespace.h"
54 #include "RepatchBuffer.h"
55 #include "SlotVisitorInlines.h"
56 #include <stdio.h>
57 #include <wtf/CommaPrinter.h>
58 #include <wtf/StringExtras.h>
59 #include <wtf/StringPrintStream.h>
60
61 #if ENABLE(DFG_JIT)
62 #include "DFGOperations.h"
63 #endif
64
65 #if ENABLE(FTL_JIT)
66 #include "FTLJITCode.h"
67 #endif
68
69 #define DUMP_CODE_BLOCK_STATISTICS 0
70
71 namespace JSC {
72
73 CString CodeBlock::inferredName() const
74 {
75     switch (codeType()) {
76     case GlobalCode:
77         return "<global>";
78     case EvalCode:
79         return "<eval>";
80     case FunctionCode:
81         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
82     default:
83         CRASH();
84         return CString("", 0);
85     }
86 }
87
88 bool CodeBlock::hasHash() const
89 {
90     return !!m_hash;
91 }
92
93 bool CodeBlock::isSafeToComputeHash() const
94 {
95     return !isCompilationThread();
96 }
97
98 CodeBlockHash CodeBlock::hash() const
99 {
100     if (!m_hash) {
101         RELEASE_ASSERT(isSafeToComputeHash());
102         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
103     }
104     return m_hash;
105 }
106
107 CString CodeBlock::sourceCodeForTools() const
108 {
109     if (codeType() != FunctionCode)
110         return ownerExecutable()->source().toUTF8();
111     
112     SourceProvider* provider = source();
113     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
114     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
115     unsigned unlinkedStartOffset = unlinked->startOffset();
116     unsigned linkedStartOffset = executable->source().startOffset();
117     int delta = linkedStartOffset - unlinkedStartOffset;
118     unsigned rangeStart = delta + unlinked->functionStartOffset();
119     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
120     return toCString(
121         "function ",
122         provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
123 }
124
125 CString CodeBlock::sourceCodeOnOneLine() const
126 {
127     return reduceWhitespace(sourceCodeForTools());
128 }
129
130 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
131 {
132     if (hasHash() || isSafeToComputeHash())
133         out.print(inferredName(), "#", hash(), ":[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
134     else
135         out.print(inferredName(), "#<no-hash>:[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
136
137     if (codeType() == FunctionCode)
138         out.print(specializationKind());
139     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
140         out.print(" (SABI)");
141     if (ownerExecutable()->neverInline())
142         out.print(" (NeverInline)");
143     out.print("]");
144 }
145
146 void CodeBlock::dump(PrintStream& out) const
147 {
148     dumpAssumingJITType(out, jitType());
149 }
150
151 static CString constantName(int k, JSValue value)
152 {
153     return toCString(value, "(@k", k - FirstConstantRegisterIndex, ")");
154 }
155
156 static CString idName(int id0, const Identifier& ident)
157 {
158     return toCString(ident.impl(), "(@id", id0, ")");
159 }
160
161 CString CodeBlock::registerName(int r) const
162 {
163     if (r == missingThisObjectMarker())
164         return "<null>";
165
166     if (isConstantRegisterIndex(r))
167         return constantName(r, getConstant(r));
168
169     return toCString("r", r);
170 }
171
172 static CString regexpToSourceString(RegExp* regExp)
173 {
174     char postfix[5] = { '/', 0, 0, 0, 0 };
175     int index = 1;
176     if (regExp->global())
177         postfix[index++] = 'g';
178     if (regExp->ignoreCase())
179         postfix[index++] = 'i';
180     if (regExp->multiline())
181         postfix[index] = 'm';
182
183     return toCString("/", regExp->pattern().impl(), postfix);
184 }
185
186 static CString regexpName(int re, RegExp* regexp)
187 {
188     return toCString(regexpToSourceString(regexp), "(@re", re, ")");
189 }
190
191 NEVER_INLINE static const char* debugHookName(int debugHookID)
192 {
193     switch (static_cast<DebugHookID>(debugHookID)) {
194         case DidEnterCallFrame:
195             return "didEnterCallFrame";
196         case WillLeaveCallFrame:
197             return "willLeaveCallFrame";
198         case WillExecuteStatement:
199             return "willExecuteStatement";
200         case WillExecuteProgram:
201             return "willExecuteProgram";
202         case DidExecuteProgram:
203             return "didExecuteProgram";
204         case DidReachBreakpoint:
205             return "didReachBreakpoint";
206     }
207
208     RELEASE_ASSERT_NOT_REACHED();
209     return "";
210 }
211
212 void CodeBlock::printUnaryOp(PrintStream& out, ExecState*, int location, const Instruction*& it, const char* op)
213 {
214     int r0 = (++it)->u.operand;
215     int r1 = (++it)->u.operand;
216
217     out.printf("[%4d] %s\t\t %s, %s", location, op, registerName(r0).data(), registerName(r1).data());
218 }
219
220 void CodeBlock::printBinaryOp(PrintStream& out, ExecState*, int location, const Instruction*& it, const char* op)
221 {
222     int r0 = (++it)->u.operand;
223     int r1 = (++it)->u.operand;
224     int r2 = (++it)->u.operand;
225     out.printf("[%4d] %s\t\t %s, %s, %s", location, op, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
226 }
227
228 void CodeBlock::printConditionalJump(PrintStream& out, ExecState*, const Instruction*, const Instruction*& it, int location, const char* op)
229 {
230     int r0 = (++it)->u.operand;
231     int offset = (++it)->u.operand;
232     out.printf("[%4d] %s\t\t %s, %d(->%d)", location, op, registerName(r0).data(), offset, location + offset);
233 }
234
235 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
236 {
237     const char* op;
238     switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
239     case op_get_by_id:
240         op = "get_by_id";
241         break;
242     case op_get_by_id_out_of_line:
243         op = "get_by_id_out_of_line";
244         break;
245     case op_get_by_id_self:
246         op = "get_by_id_self";
247         break;
248     case op_get_by_id_proto:
249         op = "get_by_id_proto";
250         break;
251     case op_get_by_id_chain:
252         op = "get_by_id_chain";
253         break;
254     case op_get_by_id_getter_self:
255         op = "get_by_id_getter_self";
256         break;
257     case op_get_by_id_getter_proto:
258         op = "get_by_id_getter_proto";
259         break;
260     case op_get_by_id_getter_chain:
261         op = "get_by_id_getter_chain";
262         break;
263     case op_get_by_id_custom_self:
264         op = "get_by_id_custom_self";
265         break;
266     case op_get_by_id_custom_proto:
267         op = "get_by_id_custom_proto";
268         break;
269     case op_get_by_id_custom_chain:
270         op = "get_by_id_custom_chain";
271         break;
272     case op_get_by_id_generic:
273         op = "get_by_id_generic";
274         break;
275     case op_get_array_length:
276         op = "array_length";
277         break;
278     case op_get_string_length:
279         op = "string_length";
280         break;
281     default:
282         RELEASE_ASSERT_NOT_REACHED();
283         op = 0;
284     }
285     int r0 = (++it)->u.operand;
286     int r1 = (++it)->u.operand;
287     int id0 = (++it)->u.operand;
288     out.printf("[%4d] %s\t %s, %s, %s", location, op, registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
289     it += 4; // Increment up to the value profiler.
290 }
291
292 #if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations
293 static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, const Identifier& ident)
294 {
295     if (!structure)
296         return;
297     
298     out.printf("%s = %p", name, structure);
299     
300     PropertyOffset offset = structure->getConcurrently(exec->vm(), ident.impl());
301     if (offset != invalidOffset)
302         out.printf(" (offset = %d)", offset);
303 }
304 #endif
305
306 #if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
307 static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, const Identifier& ident)
308 {
309     out.printf("chain = %p: [", chain);
310     bool first = true;
311     for (WriteBarrier<Structure>* currentStructure = chain->head();
312          *currentStructure;
313          ++currentStructure) {
314         if (first)
315             first = false;
316         else
317             out.printf(", ");
318         dumpStructure(out, "struct", exec, currentStructure->get(), ident);
319     }
320     out.printf("]");
321 }
322 #endif
323
324 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location)
325 {
326     Instruction* instruction = instructions().begin() + location;
327
328     const Identifier& ident = identifier(instruction[3].u.operand);
329     
330     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
331     
332 #if ENABLE(LLINT)
333     if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
334         out.printf(" llint(array_length)");
335     else if (Structure* structure = instruction[4].u.structure.get()) {
336         out.printf(" llint(");
337         dumpStructure(out, "struct", exec, structure, ident);
338         out.printf(")");
339     }
340 #endif
341
342 #if ENABLE(JIT)
343     if (numberOfStructureStubInfos()) {
344         StructureStubInfo& stubInfo = getStubInfo(location);
345         if (stubInfo.seen) {
346             out.printf(" jit(");
347             
348             Structure* baseStructure = 0;
349             Structure* prototypeStructure = 0;
350             StructureChain* chain = 0;
351             PolymorphicAccessStructureList* structureList = 0;
352             int listSize = 0;
353             
354             switch (stubInfo.accessType) {
355             case access_get_by_id_self:
356                 out.printf("self");
357                 baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
358                 break;
359             case access_get_by_id_proto:
360                 out.printf("proto");
361                 baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get();
362                 prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get();
363                 break;
364             case access_get_by_id_chain:
365                 out.printf("chain");
366                 baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
367                 chain = stubInfo.u.getByIdChain.chain.get();
368                 break;
369             case access_get_by_id_self_list:
370                 out.printf("self_list");
371                 structureList = stubInfo.u.getByIdSelfList.structureList;
372                 listSize = stubInfo.u.getByIdSelfList.listSize;
373                 break;
374             case access_get_by_id_proto_list:
375                 out.printf("proto_list");
376                 structureList = stubInfo.u.getByIdProtoList.structureList;
377                 listSize = stubInfo.u.getByIdProtoList.listSize;
378                 break;
379             case access_unset:
380                 out.printf("unset");
381                 break;
382             case access_get_by_id_generic:
383                 out.printf("generic");
384                 break;
385             case access_get_array_length:
386                 out.printf("array_length");
387                 break;
388             case access_get_string_length:
389                 out.printf("string_length");
390                 break;
391             default:
392                 RELEASE_ASSERT_NOT_REACHED();
393                 break;
394             }
395             
396             if (baseStructure) {
397                 out.printf(", ");
398                 dumpStructure(out, "struct", exec, baseStructure, ident);
399             }
400             
401             if (prototypeStructure) {
402                 out.printf(", ");
403                 dumpStructure(out, "prototypeStruct", exec, baseStructure, ident);
404             }
405             
406             if (chain) {
407                 out.printf(", ");
408                 dumpChain(out, exec, chain, ident);
409             }
410             
411             if (structureList) {
412                 out.printf(", list = %p: [", structureList);
413                 for (int i = 0; i < listSize; ++i) {
414                     if (i)
415                         out.printf(", ");
416                     out.printf("(");
417                     dumpStructure(out, "base", exec, structureList->list[i].base.get(), ident);
418                     if (structureList->list[i].isChain) {
419                         if (structureList->list[i].u.chain.get()) {
420                             out.printf(", ");
421                             dumpChain(out, exec, structureList->list[i].u.chain.get(), ident);
422                         }
423                     } else {
424                         if (structureList->list[i].u.proto.get()) {
425                             out.printf(", ");
426                             dumpStructure(out, "proto", exec, structureList->list[i].u.proto.get(), ident);
427                         }
428                     }
429                     out.printf(")");
430                 }
431                 out.printf("]");
432             }
433             out.printf(")");
434         }
435     }
436 #endif
437 }
438
439 void CodeBlock::printCallOp(PrintStream& out, ExecState*, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling)
440 {
441     int dst = (++it)->u.operand;
442     int func = (++it)->u.operand;
443     int argCount = (++it)->u.operand;
444     int registerOffset = (++it)->u.operand;
445     out.printf("[%4d] %s %s, %s, %d, %d", location, op, registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
446     if (cacheDumpMode == DumpCaches) {
447 #if ENABLE(LLINT)
448         LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
449         if (callLinkInfo->lastSeenCallee) {
450             out.printf(
451                 " llint(%p, exec %p)",
452                 callLinkInfo->lastSeenCallee.get(),
453                 callLinkInfo->lastSeenCallee->executable());
454         }
455 #endif
456 #if ENABLE(JIT)
457         if (numberOfCallLinkInfos()) {
458             JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get();
459             if (target)
460                 out.printf(" jit(%p, exec %p)", target, target->executable());
461         }
462 #endif
463         out.print(" status(", CallLinkStatus::computeFor(this, location), ")");
464     }
465     ++it;
466     dumpArrayProfiling(out, it, hasPrintedProfiling);
467     dumpValueProfiling(out, it, hasPrintedProfiling);
468 }
469
470 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState*, int location, const Instruction*& it, const char* op)
471 {
472     int r0 = (++it)->u.operand;
473     int id0 = (++it)->u.operand;
474     int r1 = (++it)->u.operand;
475     out.printf("[%4d] %s\t %s, %s, %s", location, op, registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
476     it += 5;
477 }
478
479 void CodeBlock::dumpBytecode(PrintStream& out)
480 {
481     // We only use the ExecState* for things that don't actually lead to JS execution,
482     // like converting a JSString to a String. Hence the globalExec is appropriate.
483     ExecState* exec = m_globalObject->globalExec();
484     
485     size_t instructionCount = 0;
486
487     for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
488         ++instructionCount;
489
490     out.print(*this);
491     out.printf(
492         ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
493         static_cast<unsigned long>(instructions().size()),
494         static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
495         m_numParameters, m_numCalleeRegisters, m_numVars);
496     if (symbolTable() && symbolTable()->captureCount()) {
497         out.printf(
498             "; %d captured var(s) (from r%d to r%d, inclusive)",
499             symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() - 1);
500     }
501     if (usesArguments()) {
502         out.printf(
503             "; uses arguments, in r%d, r%d",
504             argumentsRegister(),
505             unmodifiedArgumentsRegister(argumentsRegister()));
506     }
507     if (needsFullScopeChain() && codeType() == FunctionCode)
508         out.printf("; activation in r%d", activationRegister());
509
510     const Instruction* begin = instructions().begin();
511     const Instruction* end = instructions().end();
512     for (const Instruction* it = begin; it != end; ++it)
513         dumpBytecode(out, exec, begin, it);
514
515     if (numberOfIdentifiers()) {
516         out.printf("\nIdentifiers:\n");
517         size_t i = 0;
518         do {
519             out.printf("  id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
520             ++i;
521         } while (i != numberOfIdentifiers());
522     }
523
524     if (!m_constantRegisters.isEmpty()) {
525         out.printf("\nConstants:\n");
526         size_t i = 0;
527         do {
528             out.printf("   k%u = %s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data());
529             ++i;
530         } while (i < m_constantRegisters.size());
531     }
532
533     if (size_t count = m_unlinkedCode->numberOfRegExps()) {
534         out.printf("\nm_regexps:\n");
535         size_t i = 0;
536         do {
537             out.printf("  re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
538             ++i;
539         } while (i < count);
540     }
541
542 #if ENABLE(JIT)
543     if (!m_structureStubInfos.isEmpty())
544         out.printf("\nStructures:\n");
545 #endif
546
547     if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
548         out.printf("\nException Handlers:\n");
549         unsigned i = 0;
550         do {
551             out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
552             ++i;
553         } while (i < m_rareData->m_exceptionHandlers.size());
554     }
555     
556     if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
557         out.printf("Switch Jump Tables:\n");
558         unsigned i = 0;
559         do {
560             out.printf("  %1d = {\n", i);
561             int entry = 0;
562             Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
563             for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
564                 if (!*iter)
565                     continue;
566                 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
567             }
568             out.printf("      }\n");
569             ++i;
570         } while (i < m_rareData->m_switchJumpTables.size());
571     }
572     
573     if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
574         out.printf("\nString Switch Jump Tables:\n");
575         unsigned i = 0;
576         do {
577             out.printf("  %1d = {\n", i);
578             StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
579             for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
580                 out.printf("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
581             out.printf("      }\n");
582             ++i;
583         } while (i < m_rareData->m_stringSwitchJumpTables.size());
584     }
585
586     out.printf("\n");
587 }
588
589 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
590 {
591     if (hasPrintedProfiling) {
592         out.print("; ");
593         return;
594     }
595     
596     out.print("    ");
597     hasPrintedProfiling = true;
598 }
599
600 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
601 {
602     ConcurrentJITLocker locker(m_lock);
603     
604     ++it;
605 #if ENABLE(VALUE_PROFILER)
606     CString description = it->u.profile->briefDescription(locker);
607     if (!description.length())
608         return;
609     beginDumpProfiling(out, hasPrintedProfiling);
610     out.print(description);
611 #else
612     UNUSED_PARAM(out);
613     UNUSED_PARAM(hasPrintedProfiling);
614 #endif
615 }
616
617 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
618 {
619     ConcurrentJITLocker locker(m_lock);
620     
621     ++it;
622 #if ENABLE(VALUE_PROFILER)
623     if (!it->u.arrayProfile)
624         return;
625     CString description = it->u.arrayProfile->briefDescription(locker, this);
626     if (!description.length())
627         return;
628     beginDumpProfiling(out, hasPrintedProfiling);
629     out.print(description);
630 #else
631     UNUSED_PARAM(out);
632     UNUSED_PARAM(hasPrintedProfiling);
633 #endif
634 }
635
636 #if ENABLE(VALUE_PROFILER)
637 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
638 {
639     if (!profile || !profile->m_counter)
640         return;
641
642     beginDumpProfiling(out, hasPrintedProfiling);
643     out.print(name, profile->m_counter);
644 }
645 #endif
646
647 void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it)
648 {
649     int location = it - begin;
650     bool hasPrintedProfiling = false;
651     switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
652         case op_enter: {
653             out.printf("[%4d] enter", location);
654             break;
655         }
656         case op_create_activation: {
657             int r0 = (++it)->u.operand;
658             out.printf("[%4d] create_activation %s", location, registerName(r0).data());
659             break;
660         }
661         case op_create_arguments: {
662             int r0 = (++it)->u.operand;
663             out.printf("[%4d] create_arguments\t %s", location, registerName(r0).data());
664             break;
665         }
666         case op_init_lazy_reg: {
667             int r0 = (++it)->u.operand;
668             out.printf("[%4d] init_lazy_reg\t %s", location, registerName(r0).data());
669             break;
670         }
671         case op_get_callee: {
672             int r0 = (++it)->u.operand;
673             out.printf("[%4d] get_callee %s\n", location, registerName(r0).data());
674             ++it;
675             break;
676         }
677         case op_create_this: {
678             int r0 = (++it)->u.operand;
679             int r1 = (++it)->u.operand;
680             unsigned inferredInlineCapacity = (++it)->u.operand;
681             out.printf("[%4d] create_this %s, %s, %u", location, registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity);
682             break;
683         }
684         case op_to_this: {
685             int r0 = (++it)->u.operand;
686             out.printf("[%4d] to_this\t %s", location, registerName(r0).data());
687             ++it; // Skip value profile.
688             break;
689         }
690         case op_new_object: {
691             int r0 = (++it)->u.operand;
692             unsigned inferredInlineCapacity = (++it)->u.operand;
693             out.printf("[%4d] new_object\t %s, %u", location, registerName(r0).data(), inferredInlineCapacity);
694             ++it; // Skip object allocation profile.
695             break;
696         }
697         case op_new_array: {
698             int dst = (++it)->u.operand;
699             int argv = (++it)->u.operand;
700             int argc = (++it)->u.operand;
701             out.printf("[%4d] new_array\t %s, %s, %d", location, registerName(dst).data(), registerName(argv).data(), argc);
702             ++it; // Skip array allocation profile.
703             break;
704         }
705         case op_new_array_with_size: {
706             int dst = (++it)->u.operand;
707             int length = (++it)->u.operand;
708             out.printf("[%4d] new_array_with_size\t %s, %s", location, registerName(dst).data(), registerName(length).data());
709             ++it; // Skip array allocation profile.
710             break;
711         }
712         case op_new_array_buffer: {
713             int dst = (++it)->u.operand;
714             int argv = (++it)->u.operand;
715             int argc = (++it)->u.operand;
716             out.printf("[%4d] new_array_buffer\t %s, %d, %d", location, registerName(dst).data(), argv, argc);
717             ++it; // Skip array allocation profile.
718             break;
719         }
720         case op_new_regexp: {
721             int r0 = (++it)->u.operand;
722             int re0 = (++it)->u.operand;
723             out.printf("[%4d] new_regexp\t %s, ", location, registerName(r0).data());
724             if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
725                 out.printf("%s", regexpName(re0, regexp(re0)).data());
726             else
727                 out.printf("bad_regexp(%d)", re0);
728             break;
729         }
730         case op_mov: {
731             int r0 = (++it)->u.operand;
732             int r1 = (++it)->u.operand;
733             out.printf("[%4d] mov\t\t %s, %s", location, registerName(r0).data(), registerName(r1).data());
734             break;
735         }
736         case op_not: {
737             printUnaryOp(out, exec, location, it, "not");
738             break;
739         }
740         case op_eq: {
741             printBinaryOp(out, exec, location, it, "eq");
742             break;
743         }
744         case op_eq_null: {
745             printUnaryOp(out, exec, location, it, "eq_null");
746             break;
747         }
748         case op_neq: {
749             printBinaryOp(out, exec, location, it, "neq");
750             break;
751         }
752         case op_neq_null: {
753             printUnaryOp(out, exec, location, it, "neq_null");
754             break;
755         }
756         case op_stricteq: {
757             printBinaryOp(out, exec, location, it, "stricteq");
758             break;
759         }
760         case op_nstricteq: {
761             printBinaryOp(out, exec, location, it, "nstricteq");
762             break;
763         }
764         case op_less: {
765             printBinaryOp(out, exec, location, it, "less");
766             break;
767         }
768         case op_lesseq: {
769             printBinaryOp(out, exec, location, it, "lesseq");
770             break;
771         }
772         case op_greater: {
773             printBinaryOp(out, exec, location, it, "greater");
774             break;
775         }
776         case op_greatereq: {
777             printBinaryOp(out, exec, location, it, "greatereq");
778             break;
779         }
780         case op_inc: {
781             int r0 = (++it)->u.operand;
782             out.printf("[%4d] pre_inc\t\t %s", location, registerName(r0).data());
783             break;
784         }
785         case op_dec: {
786             int r0 = (++it)->u.operand;
787             out.printf("[%4d] pre_dec\t\t %s", location, registerName(r0).data());
788             break;
789         }
790         case op_to_number: {
791             printUnaryOp(out, exec, location, it, "to_number");
792             break;
793         }
794         case op_negate: {
795             printUnaryOp(out, exec, location, it, "negate");
796             break;
797         }
798         case op_add: {
799             printBinaryOp(out, exec, location, it, "add");
800             ++it;
801             break;
802         }
803         case op_mul: {
804             printBinaryOp(out, exec, location, it, "mul");
805             ++it;
806             break;
807         }
808         case op_div: {
809             printBinaryOp(out, exec, location, it, "div");
810             ++it;
811             break;
812         }
813         case op_mod: {
814             printBinaryOp(out, exec, location, it, "mod");
815             break;
816         }
817         case op_sub: {
818             printBinaryOp(out, exec, location, it, "sub");
819             ++it;
820             break;
821         }
822         case op_lshift: {
823             printBinaryOp(out, exec, location, it, "lshift");
824             break;            
825         }
826         case op_rshift: {
827             printBinaryOp(out, exec, location, it, "rshift");
828             break;
829         }
830         case op_urshift: {
831             printBinaryOp(out, exec, location, it, "urshift");
832             break;
833         }
834         case op_bitand: {
835             printBinaryOp(out, exec, location, it, "bitand");
836             ++it;
837             break;
838         }
839         case op_bitxor: {
840             printBinaryOp(out, exec, location, it, "bitxor");
841             ++it;
842             break;
843         }
844         case op_bitor: {
845             printBinaryOp(out, exec, location, it, "bitor");
846             ++it;
847             break;
848         }
849         case op_check_has_instance: {
850             int r0 = (++it)->u.operand;
851             int r1 = (++it)->u.operand;
852             int r2 = (++it)->u.operand;
853             int offset = (++it)->u.operand;
854             out.printf("[%4d] check_has_instance\t\t %s, %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
855             break;
856         }
857         case op_instanceof: {
858             int r0 = (++it)->u.operand;
859             int r1 = (++it)->u.operand;
860             int r2 = (++it)->u.operand;
861             out.printf("[%4d] instanceof\t\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
862             break;
863         }
864         case op_typeof: {
865             printUnaryOp(out, exec, location, it, "typeof");
866             break;
867         }
868         case op_is_undefined: {
869             printUnaryOp(out, exec, location, it, "is_undefined");
870             break;
871         }
872         case op_is_boolean: {
873             printUnaryOp(out, exec, location, it, "is_boolean");
874             break;
875         }
876         case op_is_number: {
877             printUnaryOp(out, exec, location, it, "is_number");
878             break;
879         }
880         case op_is_string: {
881             printUnaryOp(out, exec, location, it, "is_string");
882             break;
883         }
884         case op_is_object: {
885             printUnaryOp(out, exec, location, it, "is_object");
886             break;
887         }
888         case op_is_function: {
889             printUnaryOp(out, exec, location, it, "is_function");
890             break;
891         }
892         case op_in: {
893             printBinaryOp(out, exec, location, it, "in");
894             break;
895         }
896         case op_init_global_const_nop: {
897             out.printf("[%4d] init_global_const_nop\t", location);
898             it++;
899             it++;
900             it++;
901             it++;
902             break;
903         }
904         case op_init_global_const: {
905             WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
906             int r0 = (++it)->u.operand;
907             out.printf("[%4d] init_global_const\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(r0).data());
908             it++;
909             it++;
910             break;
911         }
912         case op_get_by_id:
913         case op_get_by_id_out_of_line:
914         case op_get_by_id_self:
915         case op_get_by_id_proto:
916         case op_get_by_id_chain:
917         case op_get_by_id_getter_self:
918         case op_get_by_id_getter_proto:
919         case op_get_by_id_getter_chain:
920         case op_get_by_id_custom_self:
921         case op_get_by_id_custom_proto:
922         case op_get_by_id_custom_chain:
923         case op_get_by_id_generic:
924         case op_get_array_length:
925         case op_get_string_length: {
926             printGetByIdOp(out, exec, location, it);
927             printGetByIdCacheStatus(out, exec, location);
928             dumpValueProfiling(out, it, hasPrintedProfiling);
929             break;
930         }
931         case op_get_arguments_length: {
932             printUnaryOp(out, exec, location, it, "get_arguments_length");
933             it++;
934             break;
935         }
936         case op_put_by_id: {
937             printPutByIdOp(out, exec, location, it, "put_by_id");
938             break;
939         }
940         case op_put_by_id_out_of_line: {
941             printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
942             break;
943         }
944         case op_put_by_id_replace: {
945             printPutByIdOp(out, exec, location, it, "put_by_id_replace");
946             break;
947         }
948         case op_put_by_id_transition: {
949             printPutByIdOp(out, exec, location, it, "put_by_id_transition");
950             break;
951         }
952         case op_put_by_id_transition_direct: {
953             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
954             break;
955         }
956         case op_put_by_id_transition_direct_out_of_line: {
957             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
958             break;
959         }
960         case op_put_by_id_transition_normal: {
961             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
962             break;
963         }
964         case op_put_by_id_transition_normal_out_of_line: {
965             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
966             break;
967         }
968         case op_put_by_id_generic: {
969             printPutByIdOp(out, exec, location, it, "put_by_id_generic");
970             break;
971         }
972         case op_put_getter_setter: {
973             int r0 = (++it)->u.operand;
974             int id0 = (++it)->u.operand;
975             int r1 = (++it)->u.operand;
976             int r2 = (++it)->u.operand;
977             out.printf("[%4d] put_getter_setter\t %s, %s, %s, %s", location, registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
978             break;
979         }
980         case op_del_by_id: {
981             int r0 = (++it)->u.operand;
982             int r1 = (++it)->u.operand;
983             int id0 = (++it)->u.operand;
984             out.printf("[%4d] del_by_id\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
985             break;
986         }
987         case op_get_by_val: {
988             int r0 = (++it)->u.operand;
989             int r1 = (++it)->u.operand;
990             int r2 = (++it)->u.operand;
991             out.printf("[%4d] get_by_val\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
992             dumpArrayProfiling(out, it, hasPrintedProfiling);
993             dumpValueProfiling(out, it, hasPrintedProfiling);
994             break;
995         }
996         case op_get_argument_by_val: {
997             int r0 = (++it)->u.operand;
998             int r1 = (++it)->u.operand;
999             int r2 = (++it)->u.operand;
1000             out.printf("[%4d] get_argument_by_val\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1001             ++it;
1002             dumpValueProfiling(out, it, hasPrintedProfiling);
1003             break;
1004         }
1005         case op_get_by_pname: {
1006             int r0 = (++it)->u.operand;
1007             int r1 = (++it)->u.operand;
1008             int r2 = (++it)->u.operand;
1009             int r3 = (++it)->u.operand;
1010             int r4 = (++it)->u.operand;
1011             int r5 = (++it)->u.operand;
1012             out.printf("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data(), registerName(r5).data());
1013             break;
1014         }
1015         case op_put_by_val: {
1016             int r0 = (++it)->u.operand;
1017             int r1 = (++it)->u.operand;
1018             int r2 = (++it)->u.operand;
1019             out.printf("[%4d] put_by_val\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1020             dumpArrayProfiling(out, it, hasPrintedProfiling);
1021             break;
1022         }
1023         case op_del_by_val: {
1024             int r0 = (++it)->u.operand;
1025             int r1 = (++it)->u.operand;
1026             int r2 = (++it)->u.operand;
1027             out.printf("[%4d] del_by_val\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1028             break;
1029         }
1030         case op_put_by_index: {
1031             int r0 = (++it)->u.operand;
1032             unsigned n0 = (++it)->u.operand;
1033             int r1 = (++it)->u.operand;
1034             out.printf("[%4d] put_by_index\t %s, %u, %s", location, registerName(r0).data(), n0, registerName(r1).data());
1035             break;
1036         }
1037         case op_jmp: {
1038             int offset = (++it)->u.operand;
1039             out.printf("[%4d] jmp\t\t %d(->%d)", location, offset, location + offset);
1040             break;
1041         }
1042         case op_jtrue: {
1043             printConditionalJump(out, exec, begin, it, location, "jtrue");
1044             break;
1045         }
1046         case op_jfalse: {
1047             printConditionalJump(out, exec, begin, it, location, "jfalse");
1048             break;
1049         }
1050         case op_jeq_null: {
1051             printConditionalJump(out, exec, begin, it, location, "jeq_null");
1052             break;
1053         }
1054         case op_jneq_null: {
1055             printConditionalJump(out, exec, begin, it, location, "jneq_null");
1056             break;
1057         }
1058         case op_jneq_ptr: {
1059             int r0 = (++it)->u.operand;
1060             Special::Pointer pointer = (++it)->u.specialPointer;
1061             int offset = (++it)->u.operand;
1062             out.printf("[%4d] jneq_ptr\t\t %s, %d (%p), %d(->%d)", location, registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1063             break;
1064         }
1065         case op_jless: {
1066             int r0 = (++it)->u.operand;
1067             int r1 = (++it)->u.operand;
1068             int offset = (++it)->u.operand;
1069             out.printf("[%4d] jless\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1070             break;
1071         }
1072         case op_jlesseq: {
1073             int r0 = (++it)->u.operand;
1074             int r1 = (++it)->u.operand;
1075             int offset = (++it)->u.operand;
1076             out.printf("[%4d] jlesseq\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1077             break;
1078         }
1079         case op_jgreater: {
1080             int r0 = (++it)->u.operand;
1081             int r1 = (++it)->u.operand;
1082             int offset = (++it)->u.operand;
1083             out.printf("[%4d] jgreater\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1084             break;
1085         }
1086         case op_jgreatereq: {
1087             int r0 = (++it)->u.operand;
1088             int r1 = (++it)->u.operand;
1089             int offset = (++it)->u.operand;
1090             out.printf("[%4d] jgreatereq\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1091             break;
1092         }
1093         case op_jnless: {
1094             int r0 = (++it)->u.operand;
1095             int r1 = (++it)->u.operand;
1096             int offset = (++it)->u.operand;
1097             out.printf("[%4d] jnless\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1098             break;
1099         }
1100         case op_jnlesseq: {
1101             int r0 = (++it)->u.operand;
1102             int r1 = (++it)->u.operand;
1103             int offset = (++it)->u.operand;
1104             out.printf("[%4d] jnlesseq\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1105             break;
1106         }
1107         case op_jngreater: {
1108             int r0 = (++it)->u.operand;
1109             int r1 = (++it)->u.operand;
1110             int offset = (++it)->u.operand;
1111             out.printf("[%4d] jngreater\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1112             break;
1113         }
1114         case op_jngreatereq: {
1115             int r0 = (++it)->u.operand;
1116             int r1 = (++it)->u.operand;
1117             int offset = (++it)->u.operand;
1118             out.printf("[%4d] jngreatereq\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1119             break;
1120         }
1121         case op_loop_hint: {
1122             out.printf("[%4d] loop_hint", location);
1123             break;
1124         }
1125         case op_switch_imm: {
1126             int tableIndex = (++it)->u.operand;
1127             int defaultTarget = (++it)->u.operand;
1128             int scrutineeRegister = (++it)->u.operand;
1129             out.printf("[%4d] switch_imm\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1130             break;
1131         }
1132         case op_switch_char: {
1133             int tableIndex = (++it)->u.operand;
1134             int defaultTarget = (++it)->u.operand;
1135             int scrutineeRegister = (++it)->u.operand;
1136             out.printf("[%4d] switch_char\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1137             break;
1138         }
1139         case op_switch_string: {
1140             int tableIndex = (++it)->u.operand;
1141             int defaultTarget = (++it)->u.operand;
1142             int scrutineeRegister = (++it)->u.operand;
1143             out.printf("[%4d] switch_string\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1144             break;
1145         }
1146         case op_new_func: {
1147             int r0 = (++it)->u.operand;
1148             int f0 = (++it)->u.operand;
1149             int shouldCheck = (++it)->u.operand;
1150             out.printf("[%4d] new_func\t\t %s, f%d, %s", location, registerName(r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
1151             break;
1152         }
1153         case op_new_func_exp: {
1154             int r0 = (++it)->u.operand;
1155             int f0 = (++it)->u.operand;
1156             out.printf("[%4d] new_func_exp\t %s, f%d", location, registerName(r0).data(), f0);
1157             break;
1158         }
1159         case op_call: {
1160             printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling);
1161             break;
1162         }
1163         case op_call_eval: {
1164             printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling);
1165             break;
1166         }
1167         case op_call_varargs: {
1168             int result = (++it)->u.operand;
1169             int callee = (++it)->u.operand;
1170             int thisValue = (++it)->u.operand;
1171             int arguments = (++it)->u.operand;
1172             int firstFreeRegister = (++it)->u.operand;
1173             ++it;
1174             out.printf("[%4d] call_varargs\t %s, %s, %s, %s, %d", location, registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister);
1175             dumpValueProfiling(out, it, hasPrintedProfiling);
1176             break;
1177         }
1178         case op_tear_off_activation: {
1179             int r0 = (++it)->u.operand;
1180             out.printf("[%4d] tear_off_activation\t %s", location, registerName(r0).data());
1181             break;
1182         }
1183         case op_tear_off_arguments: {
1184             int r0 = (++it)->u.operand;
1185             int r1 = (++it)->u.operand;
1186             out.printf("[%4d] tear_off_arguments %s, %s", location, registerName(r0).data(), registerName(r1).data());
1187             break;
1188         }
1189         case op_ret: {
1190             int r0 = (++it)->u.operand;
1191             out.printf("[%4d] ret\t\t %s", location, registerName(r0).data());
1192             break;
1193         }
1194         case op_ret_object_or_this: {
1195             int r0 = (++it)->u.operand;
1196             int r1 = (++it)->u.operand;
1197             out.printf("[%4d] constructor_ret\t\t %s %s", location, registerName(r0).data(), registerName(r1).data());
1198             break;
1199         }
1200         case op_construct: {
1201             printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling);
1202             break;
1203         }
1204         case op_strcat: {
1205             int r0 = (++it)->u.operand;
1206             int r1 = (++it)->u.operand;
1207             int count = (++it)->u.operand;
1208             out.printf("[%4d] strcat\t\t %s, %s, %d", location, registerName(r0).data(), registerName(r1).data(), count);
1209             break;
1210         }
1211         case op_to_primitive: {
1212             int r0 = (++it)->u.operand;
1213             int r1 = (++it)->u.operand;
1214             out.printf("[%4d] to_primitive\t %s, %s", location, registerName(r0).data(), registerName(r1).data());
1215             break;
1216         }
1217         case op_get_pnames: {
1218             int r0 = it[1].u.operand;
1219             int r1 = it[2].u.operand;
1220             int r2 = it[3].u.operand;
1221             int r3 = it[4].u.operand;
1222             int offset = it[5].u.operand;
1223             out.printf("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), offset, location + offset);
1224             it += OPCODE_LENGTH(op_get_pnames) - 1;
1225             break;
1226         }
1227         case op_next_pname: {
1228             int dest = it[1].u.operand;
1229             int base = it[2].u.operand;
1230             int i = it[3].u.operand;
1231             int size = it[4].u.operand;
1232             int iter = it[5].u.operand;
1233             int offset = it[6].u.operand;
1234             out.printf("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)", location, registerName(dest).data(), registerName(base).data(), registerName(i).data(), registerName(size).data(), registerName(iter).data(), offset, location + offset);
1235             it += OPCODE_LENGTH(op_next_pname) - 1;
1236             break;
1237         }
1238         case op_push_with_scope: {
1239             int r0 = (++it)->u.operand;
1240             out.printf("[%4d] push_with_scope\t %s", location, registerName(r0).data());
1241             break;
1242         }
1243         case op_pop_scope: {
1244             out.printf("[%4d] pop_scope", location);
1245             break;
1246         }
1247         case op_push_name_scope: {
1248             int id0 = (++it)->u.operand;
1249             int r1 = (++it)->u.operand;
1250             unsigned attributes = (++it)->u.operand;
1251             out.printf("[%4d] push_name_scope \t%s, %s, %u", location, idName(id0, identifier(id0)).data(), registerName(r1).data(), attributes);
1252             break;
1253         }
1254         case op_catch: {
1255             int r0 = (++it)->u.operand;
1256             out.printf("[%4d] catch\t\t %s", location, registerName(r0).data());
1257             break;
1258         }
1259         case op_throw: {
1260             int r0 = (++it)->u.operand;
1261             out.printf("[%4d] throw\t\t %s", location, registerName(r0).data());
1262             break;
1263         }
1264         case op_throw_static_error: {
1265             int k0 = (++it)->u.operand;
1266             int k1 = (++it)->u.operand;
1267             out.printf("[%4d] throw_static_error\t %s, %s", location, constantName(k0, getConstant(k0)).data(), k1 ? "true" : "false");
1268             break;
1269         }
1270         case op_debug: {
1271             int debugHookID = (++it)->u.operand;
1272             int firstLine = (++it)->u.operand;
1273             int lastLine = (++it)->u.operand;
1274             int column = (++it)->u.operand;
1275             out.printf("[%4d] debug\t\t %s, %d, %d, %d", location, debugHookName(debugHookID), firstLine, lastLine, column);
1276             break;
1277         }
1278         case op_profile_will_call: {
1279             int function = (++it)->u.operand;
1280             out.printf("[%4d] profile_will_call %s", location, registerName(function).data());
1281             break;
1282         }
1283         case op_profile_did_call: {
1284             int function = (++it)->u.operand;
1285             out.printf("[%4d] profile_did_call\t %s", location, registerName(function).data());
1286             break;
1287         }
1288         case op_end: {
1289             int r0 = (++it)->u.operand;
1290             out.printf("[%4d] end\t\t %s", location, registerName(r0).data());
1291             break;
1292         }
1293         case op_resolve_scope: {
1294             int r0 = (++it)->u.operand;
1295             int id0 = (++it)->u.operand;
1296             int resolveModeAndType = (++it)->u.operand;
1297             ++it; // depth
1298             out.printf("[%4d] resolve_scope\t %s, %s, %d", location, registerName(r0).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
1299             break;
1300         }
1301         case op_get_from_scope: {
1302             int r0 = (++it)->u.operand;
1303             int r1 = (++it)->u.operand;
1304             int id0 = (++it)->u.operand;
1305             int resolveModeAndType = (++it)->u.operand;
1306             ++it; // Structure
1307             ++it; // Operand
1308             ++it; // Skip value profile.
1309             out.printf("[%4d] get_from_scope\t %s, %s, %s, %d", location, registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
1310             break;
1311         }
1312         case op_put_to_scope: {
1313             int r0 = (++it)->u.operand;
1314             int id0 = (++it)->u.operand;
1315             int r1 = (++it)->u.operand;
1316             int resolveModeAndType = (++it)->u.operand;
1317             ++it; // Structure
1318             ++it; // Operand
1319             out.printf("[%4d] put_to_scope\t %s, %s, %s, %d", location, registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), resolveModeAndType);
1320             break;
1321         }
1322 #if ENABLE(LLINT_C_LOOP)
1323         default:
1324             RELEASE_ASSERT_NOT_REACHED();
1325 #endif
1326     }
1327
1328 #if ENABLE(VALUE_PROFILER)
1329     dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1330     dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1331 #endif
1332     
1333 #if ENABLE(DFG_JIT)
1334     Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1335     if (!exitSites.isEmpty()) {
1336         out.print(" !! frequent exits: ");
1337         CommaPrinter comma;
1338         for (unsigned i = 0; i < exitSites.size(); ++i)
1339             out.print(comma, exitSites[i].kind());
1340     }
1341 #else // ENABLE(DFG_JIT)
1342     UNUSED_PARAM(location);
1343 #endif // ENABLE(DFG_JIT)
1344     out.print("\n");
1345 }
1346
1347 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
1348 {
1349     ExecState* exec = m_globalObject->globalExec();
1350     const Instruction* it = instructions().begin() + bytecodeOffset;
1351     dumpBytecode(out, exec, instructions().begin(), it);
1352 }
1353
1354 #if DUMP_CODE_BLOCK_STATISTICS
1355 static HashSet<CodeBlock*> liveCodeBlockSet;
1356 #endif
1357
1358 #define FOR_EACH_MEMBER_VECTOR(macro) \
1359     macro(instructions) \
1360     macro(structureStubInfos) \
1361     macro(callLinkInfos) \
1362     macro(linkedCallerList) \
1363     macro(identifiers) \
1364     macro(functionExpressions) \
1365     macro(constantRegisters)
1366
1367 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1368     macro(regexps) \
1369     macro(functions) \
1370     macro(exceptionHandlers) \
1371     macro(switchJumpTables) \
1372     macro(stringSwitchJumpTables) \
1373     macro(evalCodeCache) \
1374     macro(expressionInfo) \
1375     macro(lineInfo) \
1376     macro(callReturnIndexVector)
1377
1378 template<typename T>
1379 static size_t sizeInBytes(const Vector<T>& vector)
1380 {
1381     return vector.capacity() * sizeof(T);
1382 }
1383
1384 void CodeBlock::dumpStatistics()
1385 {
1386 #if DUMP_CODE_BLOCK_STATISTICS
1387     #define DEFINE_VARS(name) size_t name##IsNotEmpty = 0; size_t name##TotalSize = 0;
1388         FOR_EACH_MEMBER_VECTOR(DEFINE_VARS)
1389         FOR_EACH_MEMBER_VECTOR_RARE_DATA(DEFINE_VARS)
1390     #undef DEFINE_VARS
1391
1392     // Non-vector data members
1393     size_t evalCodeCacheIsNotEmpty = 0;
1394
1395     size_t symbolTableIsNotEmpty = 0;
1396     size_t symbolTableTotalSize = 0;
1397
1398     size_t hasRareData = 0;
1399
1400     size_t isFunctionCode = 0;
1401     size_t isGlobalCode = 0;
1402     size_t isEvalCode = 0;
1403
1404     HashSet<CodeBlock*>::const_iterator end = liveCodeBlockSet.end();
1405     for (HashSet<CodeBlock*>::const_iterator it = liveCodeBlockSet.begin(); it != end; ++it) {
1406         CodeBlock* codeBlock = *it;
1407
1408         #define GET_STATS(name) if (!codeBlock->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_##name); }
1409             FOR_EACH_MEMBER_VECTOR(GET_STATS)
1410         #undef GET_STATS
1411
1412         if (codeBlock->symbolTable() && !codeBlock->symbolTable()->isEmpty()) {
1413             symbolTableIsNotEmpty++;
1414             symbolTableTotalSize += (codeBlock->symbolTable()->capacity() * (sizeof(SymbolTable::KeyType) + sizeof(SymbolTable::MappedType)));
1415         }
1416
1417         if (codeBlock->m_rareData) {
1418             hasRareData++;
1419             #define GET_STATS(name) if (!codeBlock->m_rareData->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_rareData->m_##name); }
1420                 FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_STATS)
1421             #undef GET_STATS
1422
1423             if (!codeBlock->m_rareData->m_evalCodeCache.isEmpty())
1424                 evalCodeCacheIsNotEmpty++;
1425         }
1426
1427         switch (codeBlock->codeType()) {
1428             case FunctionCode:
1429                 ++isFunctionCode;
1430                 break;
1431             case GlobalCode:
1432                 ++isGlobalCode;
1433                 break;
1434             case EvalCode:
1435                 ++isEvalCode;
1436                 break;
1437         }
1438     }
1439
1440     size_t totalSize = 0;
1441
1442     #define GET_TOTAL_SIZE(name) totalSize += name##TotalSize;
1443         FOR_EACH_MEMBER_VECTOR(GET_TOTAL_SIZE)
1444         FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_TOTAL_SIZE)
1445     #undef GET_TOTAL_SIZE
1446
1447     totalSize += symbolTableTotalSize;
1448     totalSize += (liveCodeBlockSet.size() * sizeof(CodeBlock));
1449
1450     dataLogF("Number of live CodeBlocks: %d\n", liveCodeBlockSet.size());
1451     dataLogF("Size of a single CodeBlock [sizeof(CodeBlock)]: %zu\n", sizeof(CodeBlock));
1452     dataLogF("Size of all CodeBlocks: %zu\n", totalSize);
1453     dataLogF("Average size of a CodeBlock: %zu\n", totalSize / liveCodeBlockSet.size());
1454
1455     dataLogF("Number of FunctionCode CodeBlocks: %zu (%.3f%%)\n", isFunctionCode, static_cast<double>(isFunctionCode) * 100.0 / liveCodeBlockSet.size());
1456     dataLogF("Number of GlobalCode CodeBlocks: %zu (%.3f%%)\n", isGlobalCode, static_cast<double>(isGlobalCode) * 100.0 / liveCodeBlockSet.size());
1457     dataLogF("Number of EvalCode CodeBlocks: %zu (%.3f%%)\n", isEvalCode, static_cast<double>(isEvalCode) * 100.0 / liveCodeBlockSet.size());
1458
1459     dataLogF("Number of CodeBlocks with rare data: %zu (%.3f%%)\n", hasRareData, static_cast<double>(hasRareData) * 100.0 / liveCodeBlockSet.size());
1460
1461     #define PRINT_STATS(name) dataLogF("Number of CodeBlocks with " #name ": %zu\n", name##IsNotEmpty); dataLogF("Size of all " #name ": %zu\n", name##TotalSize); 
1462         FOR_EACH_MEMBER_VECTOR(PRINT_STATS)
1463         FOR_EACH_MEMBER_VECTOR_RARE_DATA(PRINT_STATS)
1464     #undef PRINT_STATS
1465
1466     dataLogF("Number of CodeBlocks with evalCodeCache: %zu\n", evalCodeCacheIsNotEmpty);
1467     dataLogF("Number of CodeBlocks with symbolTable: %zu\n", symbolTableIsNotEmpty);
1468
1469     dataLogF("Size of all symbolTables: %zu\n", symbolTableTotalSize);
1470
1471 #else
1472     dataLogF("Dumping CodeBlock statistics is not enabled.\n");
1473 #endif
1474 }
1475
1476 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1477     : m_globalObject(other.m_globalObject)
1478     , m_heap(other.m_heap)
1479     , m_numCalleeRegisters(other.m_numCalleeRegisters)
1480     , m_numVars(other.m_numVars)
1481     , m_isConstructor(other.m_isConstructor)
1482     , m_shouldAlwaysBeInlined(true)
1483     , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1484     , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1485     , m_vm(other.m_vm)
1486     , m_instructions(other.m_instructions)
1487     , m_thisRegister(other.m_thisRegister)
1488     , m_argumentsRegister(other.m_argumentsRegister)
1489     , m_activationRegister(other.m_activationRegister)
1490     , m_isStrictMode(other.m_isStrictMode)
1491     , m_needsActivation(other.m_needsActivation)
1492     , m_source(other.m_source)
1493     , m_sourceOffset(other.m_sourceOffset)
1494     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1495     , m_codeType(other.m_codeType)
1496     , m_additionalIdentifiers(other.m_additionalIdentifiers)
1497     , m_constantRegisters(other.m_constantRegisters)
1498     , m_functionDecls(other.m_functionDecls)
1499     , m_functionExprs(other.m_functionExprs)
1500     , m_osrExitCounter(0)
1501     , m_optimizationDelayCounter(0)
1502     , m_reoptimizationRetryCounter(0)
1503     , m_hash(other.m_hash)
1504 #if ENABLE(JIT)
1505     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1506 #endif
1507 {
1508     setNumParameters(other.numParameters());
1509     optimizeAfterWarmUp();
1510     jitAfterWarmUp();
1511
1512     if (other.m_rareData) {
1513         createRareDataIfNecessary();
1514         
1515         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1516         m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1517         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1518         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1519     }
1520 }
1521
1522 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1523     : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1524     , m_heap(&m_globalObject->vm().heap)
1525     , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1526     , m_numVars(unlinkedCodeBlock->m_numVars)
1527     , m_isConstructor(unlinkedCodeBlock->isConstructor())
1528     , m_shouldAlwaysBeInlined(true)
1529     , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1530     , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1531     , m_vm(unlinkedCodeBlock->vm())
1532     , m_thisRegister(unlinkedCodeBlock->thisRegister())
1533     , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
1534     , m_activationRegister(unlinkedCodeBlock->activationRegister())
1535     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1536     , m_needsActivation(unlinkedCodeBlock->needsFullScopeChain() && unlinkedCodeBlock->codeType() == FunctionCode)
1537     , m_source(sourceProvider)
1538     , m_sourceOffset(sourceOffset)
1539     , m_firstLineColumnOffset(firstLineColumnOffset)
1540     , m_codeType(unlinkedCodeBlock->codeType())
1541     , m_osrExitCounter(0)
1542     , m_optimizationDelayCounter(0)
1543     , m_reoptimizationRetryCounter(0)
1544 #if ENABLE(JIT)
1545     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1546 #endif
1547 {
1548     m_vm->startedCompiling(this);
1549
1550     ASSERT(m_source);
1551     setNumParameters(unlinkedCodeBlock->numParameters());
1552
1553 #if DUMP_CODE_BLOCK_STATISTICS
1554     liveCodeBlockSet.add(this);
1555 #endif
1556
1557     setConstantRegisters(unlinkedCodeBlock->constantRegisters());
1558     if (unlinkedCodeBlock->usesGlobalObject())
1559         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister()].set(*m_vm, ownerExecutable, m_globalObject.get());
1560     m_functionDecls.grow(unlinkedCodeBlock->numberOfFunctionDecls());
1561     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1562         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1563         unsigned lineCount = unlinkedExecutable->lineCount();
1564         unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1565         unsigned startColumn = unlinkedExecutable->functionStartColumn();
1566         startColumn += (unlinkedExecutable->firstLineOffset() ? 1 : ownerExecutable->startColumn());
1567         unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1568         unsigned sourceLength = unlinkedExecutable->sourceLength();
1569         SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1570         FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn);
1571         m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
1572     }
1573
1574     m_functionExprs.grow(unlinkedCodeBlock->numberOfFunctionExprs());
1575     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1576         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1577         unsigned lineCount = unlinkedExecutable->lineCount();
1578         unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1579         unsigned startColumn = unlinkedExecutable->functionStartColumn();
1580         startColumn += (unlinkedExecutable->firstLineOffset() ? 1 : ownerExecutable->startColumn());
1581         unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1582         unsigned sourceLength = unlinkedExecutable->sourceLength();
1583         SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1584         FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn);
1585         m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
1586     }
1587
1588     if (unlinkedCodeBlock->hasRareData()) {
1589         createRareDataIfNecessary();
1590         if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1591             m_rareData->m_constantBuffers.grow(count);
1592             for (size_t i = 0; i < count; i++) {
1593                 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1594                 m_rareData->m_constantBuffers[i] = buffer;
1595             }
1596         }
1597         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1598             m_rareData->m_exceptionHandlers.grow(count);
1599             size_t nonLocalScopeDepth = scope->depth();
1600             for (size_t i = 0; i < count; i++) {
1601                 const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
1602                 m_rareData->m_exceptionHandlers[i].start = handler.start;
1603                 m_rareData->m_exceptionHandlers[i].end = handler.end;
1604                 m_rareData->m_exceptionHandlers[i].target = handler.target;
1605                 m_rareData->m_exceptionHandlers[i].scopeDepth = nonLocalScopeDepth + handler.scopeDepth;
1606 #if ENABLE(JIT) && ENABLE(LLINT)
1607                 m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch)));
1608 #endif
1609             }
1610         }
1611
1612         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1613             m_rareData->m_stringSwitchJumpTables.grow(count);
1614             for (size_t i = 0; i < count; i++) {
1615                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1616                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1617                 for (; ptr != end; ++ptr) {
1618                     OffsetLocation offset;
1619                     offset.branchOffset = ptr->value;
1620                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1621                 }
1622             }
1623         }
1624
1625         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1626             m_rareData->m_switchJumpTables.grow(count);
1627             for (size_t i = 0; i < count; i++) {
1628                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1629                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1630                 destTable.branchOffsets = sourceTable.branchOffsets;
1631                 destTable.min = sourceTable.min;
1632             }
1633         }
1634     }
1635
1636     // Allocate metadata buffers for the bytecode
1637 #if ENABLE(LLINT)
1638     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1639         m_llintCallLinkInfos.grow(size);
1640 #endif
1641 #if ENABLE(DFG_JIT)
1642     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1643         m_arrayProfiles.grow(size);
1644     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1645         m_arrayAllocationProfiles.grow(size);
1646     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1647         m_valueProfiles.grow(size);
1648 #endif
1649     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1650         m_objectAllocationProfiles.grow(size);
1651
1652     // Copy and translate the UnlinkedInstructions
1653     size_t instructionCount = unlinkedCodeBlock->instructions().size();
1654     UnlinkedInstruction* pc = unlinkedCodeBlock->instructions().data();
1655     Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1656     for (size_t i = 0; i < unlinkedCodeBlock->instructions().size(); ) {
1657         unsigned opLength = opcodeLength(pc[i].u.opcode);
1658         instructions[i] = vm()->interpreter->getOpcode(pc[i].u.opcode);
1659         for (size_t j = 1; j < opLength; ++j) {
1660             if (sizeof(int32_t) != sizeof(intptr_t))
1661                 instructions[i + j].u.pointer = 0;
1662             instructions[i + j].u.operand = pc[i + j].u.operand;
1663         }
1664         switch (pc[i].u.opcode) {
1665 #if ENABLE(DFG_JIT)
1666         case op_get_by_val:
1667         case op_get_argument_by_val: {
1668             int arrayProfileIndex = pc[i + opLength - 2].u.operand;
1669             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1670
1671             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1672             // fallthrough
1673         }
1674         case op_to_this:
1675         case op_get_by_id:
1676         case op_call_varargs:
1677         case op_get_callee: {
1678             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1679             ASSERT(profile->m_bytecodeOffset == -1);
1680             profile->m_bytecodeOffset = i;
1681             instructions[i + opLength - 1] = profile;
1682             break;
1683         }
1684         case op_put_by_val: {
1685             int arrayProfileIndex = pc[i + opLength - 1].u.operand;
1686             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1687             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1688             break;
1689         }
1690
1691         case op_new_array:
1692         case op_new_array_buffer:
1693         case op_new_array_with_size: {
1694             int arrayAllocationProfileIndex = pc[i + opLength - 1].u.operand;
1695             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1696             break;
1697         }
1698 #endif
1699         case op_new_object: {
1700             int objectAllocationProfileIndex = pc[i + opLength - 1].u.operand;
1701             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1702             int inferredInlineCapacity = pc[i + opLength - 2].u.operand;
1703
1704             instructions[i + opLength - 1] = objectAllocationProfile;
1705             objectAllocationProfile->initialize(*vm(),
1706                 m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
1707             break;
1708         }
1709
1710         case op_call:
1711         case op_call_eval: {
1712 #if ENABLE(DFG_JIT)
1713             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1714             ASSERT(profile->m_bytecodeOffset == -1);
1715             profile->m_bytecodeOffset = i;
1716             instructions[i + opLength - 1] = profile;
1717             int arrayProfileIndex = pc[i + opLength - 2].u.operand;
1718             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1719             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1720 #endif
1721 #if ENABLE(LLINT)
1722             instructions[i + 5] = &m_llintCallLinkInfos[pc[i + 5].u.operand];
1723 #endif
1724             break;
1725         }
1726         case op_construct: {
1727 #if ENABLE(LLINT)
1728             instructions[i + 5] = &m_llintCallLinkInfos[pc[i + 5].u.operand];
1729 #endif
1730 #if ENABLE(DFG_JIT)
1731             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1732             ASSERT(profile->m_bytecodeOffset == -1);
1733             profile->m_bytecodeOffset = i;
1734             instructions[i + opLength - 1] = profile;
1735 #endif
1736             break;
1737         }
1738         case op_get_by_id_out_of_line:
1739         case op_get_by_id_self:
1740         case op_get_by_id_proto:
1741         case op_get_by_id_chain:
1742         case op_get_by_id_getter_self:
1743         case op_get_by_id_getter_proto:
1744         case op_get_by_id_getter_chain:
1745         case op_get_by_id_custom_self:
1746         case op_get_by_id_custom_proto:
1747         case op_get_by_id_custom_chain:
1748         case op_get_by_id_generic:
1749         case op_get_array_length:
1750         case op_get_string_length:
1751             CRASH();
1752
1753         case op_init_global_const_nop: {
1754             ASSERT(codeType() == GlobalCode);
1755             Identifier ident = identifier(pc[i + 4].u.operand);
1756             SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
1757             if (entry.isNull())
1758                 break;
1759
1760             // It's likely that we'll write to this var, so notify now and avoid the overhead of doing so at runtime.
1761             entry.notifyWrite();
1762
1763             instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
1764             instructions[i + 1] = &m_globalObject->registerAt(entry.getIndex());
1765             break;
1766         }
1767
1768         case op_resolve_scope: {
1769             const Identifier& ident = identifier(pc[i + 2].u.operand);
1770             ResolveType type = static_cast<ResolveType>(pc[i + 3].u.operand);
1771
1772             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, type);
1773             instructions[i + 3].u.operand = op.type;
1774             instructions[i + 4].u.operand = op.depth;
1775             break;
1776         }
1777
1778         case op_get_from_scope: {
1779 #if ENABLE(VALUE_PROFILER)
1780             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1781             ASSERT(profile->m_bytecodeOffset == -1);
1782             profile->m_bytecodeOffset = i;
1783             instructions[i + opLength - 1] = profile;
1784 #endif
1785
1786             // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
1787             const Identifier& ident = identifier(pc[i + 3].u.operand);
1788             ResolveModeAndType modeAndType = ResolveModeAndType(pc[i + 4].u.operand);
1789             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, modeAndType.type());
1790
1791             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1792             if (op.structure)
1793                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1794             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1795             break;
1796         }
1797
1798         case op_put_to_scope: {
1799             // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
1800             const Identifier& ident = identifier(pc[i + 2].u.operand);
1801             ResolveModeAndType modeAndType = ResolveModeAndType(pc[i + 4].u.operand);
1802             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Put, modeAndType.type());
1803
1804             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1805             if (op.structure)
1806                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1807             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1808             break;
1809         }
1810
1811         case op_debug: {
1812             instructions[i + 4] = columnNumberForBytecodeOffset(i);
1813             break;
1814         }
1815
1816         default:
1817             break;
1818         }
1819         i += opLength;
1820     }
1821     m_instructions = WTF::RefCountedArray<Instruction>(instructions);
1822
1823     // Set optimization thresholds only after m_instructions is initialized, since these
1824     // rely on the instruction count (and are in theory permitted to also inspect the
1825     // instruction stream to more accurate assess the cost of tier-up).
1826     optimizeAfterWarmUp();
1827     jitAfterWarmUp();
1828
1829     // If the concurrent thread will want the code block's hash, then compute it here
1830     // synchronously.
1831     if (Options::showDisassembly()
1832         || Options::showDFGDisassembly()
1833         || Options::dumpBytecodeAtDFGTime()
1834         || Options::verboseCompilation()
1835         || Options::logCompilationChanges()
1836         || Options::validateGraph()
1837         || Options::validateGraphAtEachPhase()
1838         || Options::verboseOSR()
1839         || Options::verboseCompilationQueue()
1840         || Options::reportCompileTimes()
1841         || Options::verboseCFA())
1842         hash();
1843
1844     if (Options::dumpGeneratedBytecodes())
1845         dumpBytecode();
1846     m_vm->finishedCompiling(this);
1847 }
1848
1849 CodeBlock::~CodeBlock()
1850 {
1851     if (m_vm->m_perBytecodeProfiler)
1852         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
1853     
1854 #if ENABLE(DFG_JIT)
1855     // Remove myself from the set of DFG code blocks. Note that I may not be in this set
1856     // (because I'm not a DFG code block), in which case this is a no-op anyway.
1857     m_vm->heap.m_dfgCodeBlocks.m_set.remove(this);
1858 #endif
1859     
1860 #if ENABLE(VERBOSE_VALUE_PROFILE)
1861     dumpValueProfiles();
1862 #endif
1863
1864 #if ENABLE(LLINT)    
1865     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1866         m_incomingLLIntCalls.begin()->remove();
1867 #endif // ENABLE(LLINT)
1868 #if ENABLE(JIT)
1869     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
1870     // Consider that two CodeBlocks become unreachable at the same time. There
1871     // is no guarantee about the order in which the CodeBlocks are destroyed.
1872     // So, if we don't remove incoming calls, and get destroyed before the
1873     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
1874     // destructor will try to remove nodes from our (no longer valid) linked list.
1875     while (m_incomingCalls.begin() != m_incomingCalls.end())
1876         m_incomingCalls.begin()->remove();
1877     
1878     // Note that our outgoing calls will be removed from other CodeBlocks'
1879     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
1880     // destructors.
1881
1882     for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i)
1883         m_structureStubInfos[i].deref();
1884 #endif // ENABLE(JIT)
1885
1886 #if DUMP_CODE_BLOCK_STATISTICS
1887     liveCodeBlockSet.remove(this);
1888 #endif
1889 }
1890
1891 void CodeBlock::setNumParameters(int newValue)
1892 {
1893     m_numParameters = newValue;
1894
1895 #if ENABLE(VALUE_PROFILER)
1896     m_argumentValueProfiles.resizeToFit(newValue);
1897 #endif
1898 }
1899
1900 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
1901 {
1902     EvalCacheMap::iterator end = m_cacheMap.end();
1903     for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
1904         visitor.append(&ptr->value);
1905 }
1906
1907 void CodeBlock::visitAggregate(SlotVisitor& visitor)
1908 {
1909 #if ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
1910     if (JITCode::isOptimizingJIT(jitType())) {
1911         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1912         
1913         // I may be asked to scan myself more than once, and it may even happen concurrently.
1914         // To this end, use a CAS loop to check if I've been called already. Only one thread
1915         // may proceed past this point - whichever one wins the CAS race.
1916         unsigned oldValue;
1917         do {
1918             oldValue = dfgCommon->visitAggregateHasBeenCalled;
1919             if (oldValue) {
1920                 // Looks like someone else won! Return immediately to ensure that we don't
1921                 // trace the same CodeBlock concurrently. Doing so is hazardous since we will
1922                 // be mutating the state of ValueProfiles, which contain JSValues, which can
1923                 // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
1924                 // that are nearly impossible to track down.
1925                 
1926                 // Also note that it must be safe to return early as soon as we see the
1927                 // value true (well, (unsigned)1), since once a GC thread is in this method
1928                 // and has won the CAS race (i.e. was responsible for setting the value true)
1929                 // it will definitely complete the rest of this method before declaring
1930                 // termination.
1931                 return;
1932             }
1933         } while (!WTF::weakCompareAndSwap(&dfgCommon->visitAggregateHasBeenCalled, 0, 1));
1934     }
1935 #endif // ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
1936     
1937     if (!!m_alternative)
1938         m_alternative->visitAggregate(visitor);
1939
1940     visitor.append(&m_unlinkedCode);
1941
1942     // There are three things that may use unconditional finalizers: lazy bytecode freeing,
1943     // inline cache clearing, and jettisoning. The probability of us wanting to do at
1944     // least one of those things is probably quite close to 1. So we add one no matter what
1945     // and when it runs, it figures out whether it has any work to do.
1946     visitor.addUnconditionalFinalizer(this);
1947     
1948     // There are two things that we use weak reference harvesters for: DFG fixpoint for
1949     // jettisoning, and trying to find structures that would be live based on some
1950     // inline cache. So it makes sense to register them regardless.
1951     visitor.addWeakReferenceHarvester(this);
1952     m_allTransitionsHaveBeenMarked = false;
1953     
1954     if (shouldImmediatelyAssumeLivenessDuringScan()) {
1955         // This code block is live, so scan all references strongly and return.
1956         stronglyVisitStrongReferences(visitor);
1957         stronglyVisitWeakReferences(visitor);
1958         propagateTransitions(visitor);
1959         return;
1960     }
1961     
1962 #if ENABLE(DFG_JIT)
1963     // We get here if we're live in the sense that our owner executable is live,
1964     // but we're not yet live for sure in another sense: we may yet decide that this
1965     // code block should be jettisoned based on its outgoing weak references being
1966     // stale. Set a flag to indicate that we're still assuming that we're dead, and
1967     // perform one round of determining if we're live. The GC may determine, based on
1968     // either us marking additional objects, or by other objects being marked for
1969     // other reasons, that this iteration should run again; it will notify us of this
1970     // decision by calling harvestWeakReferences().
1971     
1972     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
1973     
1974     propagateTransitions(visitor);
1975     determineLiveness(visitor);
1976 #else // ENABLE(DFG_JIT)
1977     RELEASE_ASSERT_NOT_REACHED();
1978 #endif // ENABLE(DFG_JIT)
1979 }
1980
1981 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
1982 {
1983     UNUSED_PARAM(visitor);
1984
1985     if (m_allTransitionsHaveBeenMarked)
1986         return;
1987
1988     bool allAreMarkedSoFar = true;
1989         
1990 #if ENABLE(LLINT)
1991     Interpreter* interpreter = m_vm->interpreter;
1992     if (jitType() == JITCode::InterpreterThunk) {
1993         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1994         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1995             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
1996             switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
1997             case op_put_by_id_transition_direct:
1998             case op_put_by_id_transition_normal:
1999             case op_put_by_id_transition_direct_out_of_line:
2000             case op_put_by_id_transition_normal_out_of_line: {
2001                 if (Heap::isMarked(instruction[4].u.structure.get()))
2002                     visitor.append(&instruction[6].u.structure);
2003                 else
2004                     allAreMarkedSoFar = false;
2005                 break;
2006             }
2007             default:
2008                 break;
2009             }
2010         }
2011     }
2012 #endif // ENABLE(LLINT)
2013
2014 #if ENABLE(JIT)
2015     if (JITCode::isJIT(jitType())) {
2016         for (unsigned i = 0; i < m_structureStubInfos.size(); ++i) {
2017             StructureStubInfo& stubInfo = m_structureStubInfos[i];
2018             switch (stubInfo.accessType) {
2019             case access_put_by_id_transition_normal:
2020             case access_put_by_id_transition_direct: {
2021                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2022                 if ((!origin || Heap::isMarked(origin))
2023                     && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
2024                     visitor.append(&stubInfo.u.putByIdTransition.structure);
2025                 else
2026                     allAreMarkedSoFar = false;
2027                 break;
2028             }
2029
2030             case access_put_by_id_list: {
2031                 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
2032                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2033                 if (origin && !Heap::isMarked(origin)) {
2034                     allAreMarkedSoFar = false;
2035                     break;
2036                 }
2037                 for (unsigned j = list->size(); j--;) {
2038                     PutByIdAccess& access = list->m_list[j];
2039                     if (!access.isTransition())
2040                         continue;
2041                     if (Heap::isMarked(access.oldStructure()))
2042                         visitor.append(&access.m_newStructure);
2043                     else
2044                         allAreMarkedSoFar = false;
2045                 }
2046                 break;
2047             }
2048             
2049             default:
2050                 break;
2051             }
2052         }
2053     }
2054 #endif // ENABLE(JIT)
2055     
2056 #if ENABLE(DFG_JIT)
2057     if (JITCode::isOptimizingJIT(jitType())) {
2058         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2059         for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2060             if ((!dfgCommon->transitions[i].m_codeOrigin
2061                  || Heap::isMarked(dfgCommon->transitions[i].m_codeOrigin.get()))
2062                 && Heap::isMarked(dfgCommon->transitions[i].m_from.get())) {
2063                 // If the following three things are live, then the target of the
2064                 // transition is also live:
2065                 // - This code block. We know it's live already because otherwise
2066                 //   we wouldn't be scanning ourselves.
2067                 // - The code origin of the transition. Transitions may arise from
2068                 //   code that was inlined. They are not relevant if the user's
2069                 //   object that is required for the inlinee to run is no longer
2070                 //   live.
2071                 // - The source of the transition. The transition checks if some
2072                 //   heap location holds the source, and if so, stores the target.
2073                 //   Hence the source must be live for the transition to be live.
2074                 visitor.append(&dfgCommon->transitions[i].m_to);
2075             } else
2076                 allAreMarkedSoFar = false;
2077         }
2078     }
2079 #endif // ENABLE(DFG_JIT)
2080     
2081     if (allAreMarkedSoFar)
2082         m_allTransitionsHaveBeenMarked = true;
2083 }
2084
2085 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2086 {
2087     UNUSED_PARAM(visitor);
2088     
2089     if (shouldImmediatelyAssumeLivenessDuringScan())
2090         return;
2091     
2092 #if ENABLE(DFG_JIT)
2093     // Check if we have any remaining work to do.
2094     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2095     if (dfgCommon->livenessHasBeenProved)
2096         return;
2097     
2098     // Now check all of our weak references. If all of them are live, then we
2099     // have proved liveness and so we scan our strong references. If at end of
2100     // GC we still have not proved liveness, then this code block is toast.
2101     bool allAreLiveSoFar = true;
2102     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2103         if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2104             allAreLiveSoFar = false;
2105             break;
2106         }
2107     }
2108     
2109     // If some weak references are dead, then this fixpoint iteration was
2110     // unsuccessful.
2111     if (!allAreLiveSoFar)
2112         return;
2113     
2114     // All weak references are live. Record this information so we don't
2115     // come back here again, and scan the strong references.
2116     dfgCommon->livenessHasBeenProved = true;
2117     stronglyVisitStrongReferences(visitor);
2118 #endif // ENABLE(DFG_JIT)
2119 }
2120
2121 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2122 {
2123     propagateTransitions(visitor);
2124     determineLiveness(visitor);
2125 }
2126
2127 void CodeBlock::finalizeUnconditionally()
2128 {
2129 #if ENABLE(LLINT)
2130     Interpreter* interpreter = m_vm->interpreter;
2131     if (JITCode::couldBeInterpreted(jitType())) {
2132         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2133         for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2134             Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2135             switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2136             case op_get_by_id:
2137             case op_get_by_id_out_of_line:
2138             case op_put_by_id:
2139             case op_put_by_id_out_of_line:
2140                 if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2141                     break;
2142                 if (Options::verboseOSR())
2143                     dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2144                 curInstruction[4].u.structure.clear();
2145                 curInstruction[5].u.operand = 0;
2146                 break;
2147             case op_put_by_id_transition_direct:
2148             case op_put_by_id_transition_normal:
2149             case op_put_by_id_transition_direct_out_of_line:
2150             case op_put_by_id_transition_normal_out_of_line:
2151                 if (Heap::isMarked(curInstruction[4].u.structure.get())
2152                     && Heap::isMarked(curInstruction[6].u.structure.get())
2153                     && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2154                     break;
2155                 if (Options::verboseOSR()) {
2156                     dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2157                             curInstruction[4].u.structure.get(),
2158                             curInstruction[6].u.structure.get(),
2159                             curInstruction[7].u.structureChain.get());
2160                 }
2161                 curInstruction[4].u.structure.clear();
2162                 curInstruction[6].u.structure.clear();
2163                 curInstruction[7].u.structureChain.clear();
2164                 curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2165                 break;
2166             case op_get_array_length:
2167                 break;
2168             case op_get_from_scope:
2169             case op_put_to_scope: {
2170                 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2171                 if (!structure || Heap::isMarked(structure.get()))
2172                     break;
2173                 if (Options::verboseOSR())
2174                     dataLogF("Clearing LLInt scope access with structure %p.\n", structure.get());
2175                 structure.clear();
2176                 break;
2177             }
2178             default:
2179                 RELEASE_ASSERT_NOT_REACHED();
2180             }
2181         }
2182
2183         for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2184             if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2185                 if (Options::verboseOSR())
2186                     dataLog("Clearing LLInt call from ", *this, "\n");
2187                 m_llintCallLinkInfos[i].unlink();
2188             }
2189             if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2190                 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2191         }
2192     }
2193 #endif // ENABLE(LLINT)
2194
2195 #if ENABLE(DFG_JIT)
2196     // Check if we're not live. If we are, then jettison.
2197     if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_jitCode->dfgCommon()->livenessHasBeenProved)) {
2198         if (Options::verboseOSR())
2199             dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2200
2201         if (DFG::shouldShowDisassembly()) {
2202             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2203             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2204             for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2205                 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
2206                 JSCell* origin = transition.m_codeOrigin.get();
2207                 JSCell* from = transition.m_from.get();
2208                 JSCell* to = transition.m_to.get();
2209                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2210                     continue;
2211                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2212             }
2213             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2214                 JSCell* weak = dfgCommon->weakReferences[i].get();
2215                 if (Heap::isMarked(weak))
2216                     continue;
2217                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2218             }
2219         }
2220         
2221         jettison();
2222         return;
2223     }
2224 #endif // ENABLE(DFG_JIT)
2225
2226 #if ENABLE(JIT)
2227     // Handle inline caches.
2228     if (!!jitCode()) {
2229         RepatchBuffer repatchBuffer(this);
2230         for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
2231             if (callLinkInfo(i).isLinked()) {
2232                 if (ClosureCallStubRoutine* stub = callLinkInfo(i).stub.get()) {
2233                     if (!Heap::isMarked(stub->structure())
2234                         || !Heap::isMarked(stub->executable())) {
2235                         if (Options::verboseOSR()) {
2236                             dataLog(
2237                                 "Clearing closure call from ", *this, " to ",
2238                                 stub->executable()->hashFor(callLinkInfo(i).specializationKind()),
2239                                 ", stub routine ", RawPointer(stub), ".\n");
2240                         }
2241                         callLinkInfo(i).unlink(*m_vm, repatchBuffer);
2242                     }
2243                 } else if (!Heap::isMarked(callLinkInfo(i).callee.get())) {
2244                     if (Options::verboseOSR()) {
2245                         dataLog(
2246                             "Clearing call from ", *this, " to ",
2247                             RawPointer(callLinkInfo(i).callee.get()), " (",
2248                             callLinkInfo(i).callee.get()->executable()->hashFor(
2249                                 callLinkInfo(i).specializationKind()),
2250                             ").\n");
2251                     }
2252                     callLinkInfo(i).unlink(*m_vm, repatchBuffer);
2253                 }
2254             }
2255             if (!!callLinkInfo(i).lastSeenCallee
2256                 && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
2257                 callLinkInfo(i).lastSeenCallee.clear();
2258         }
2259         for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i) {
2260             StructureStubInfo& stubInfo = m_structureStubInfos[i];
2261             
2262             if (stubInfo.visitWeakReferences())
2263                 continue;
2264             
2265             resetStubDuringGCInternal(repatchBuffer, stubInfo);
2266         }
2267     }
2268 #endif
2269 }
2270
2271 #if ENABLE(JIT)
2272 void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2273 {
2274     if (stubInfo.accessType == access_unset)
2275         return;
2276     
2277     RepatchBuffer repatchBuffer(this);
2278     resetStubInternal(repatchBuffer, stubInfo);
2279 }
2280
2281 void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2282 {
2283     AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2284     
2285     if (Options::verboseOSR()) {
2286         // This can be called from GC destructor calls, so we don't try to do a full dump
2287         // of the CodeBlock.
2288         dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
2289     }
2290
2291     switch (jitType()) {
2292     case JITCode::BaselineJIT:
2293         if (isGetByIdAccess(accessType))
2294             JIT::resetPatchGetById(repatchBuffer, &stubInfo);
2295         else {
2296             RELEASE_ASSERT(isPutByIdAccess(accessType));
2297             JIT::resetPatchPutById(repatchBuffer, &stubInfo);
2298         }
2299         break;
2300     case JITCode::DFGJIT:
2301         if (isGetByIdAccess(accessType))
2302             DFG::dfgResetGetByID(repatchBuffer, stubInfo);
2303         else if (isPutByIdAccess(accessType))
2304             DFG::dfgResetPutByID(repatchBuffer, stubInfo);
2305         else {
2306             RELEASE_ASSERT(isInAccess(accessType));
2307             DFG::dfgResetIn(repatchBuffer, stubInfo);
2308         }
2309         break;
2310     default:
2311         RELEASE_ASSERT_NOT_REACHED();
2312         break;
2313     }
2314     
2315     stubInfo.reset();
2316 }
2317
2318 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2319 {
2320     resetStubInternal(repatchBuffer, stubInfo);
2321     stubInfo.resetByGC = true;
2322 }
2323 #endif
2324
2325 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2326 {
2327     visitor.append(&m_globalObject);
2328     visitor.append(&m_ownerExecutable);
2329     visitor.append(&m_unlinkedCode);
2330     if (m_rareData)
2331         m_rareData->m_evalCodeCache.visitAggregate(visitor);
2332     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2333     for (size_t i = 0; i < m_functionExprs.size(); ++i)
2334         visitor.append(&m_functionExprs[i]);
2335     for (size_t i = 0; i < m_functionDecls.size(); ++i)
2336         visitor.append(&m_functionDecls[i]);
2337     for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2338         m_objectAllocationProfiles[i].visitAggregate(visitor);
2339
2340     updateAllPredictions(Collection);
2341 }
2342
2343 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2344 {
2345     UNUSED_PARAM(visitor);
2346
2347 #if ENABLE(DFG_JIT)
2348     if (!JITCode::isOptimizingJIT(jitType()))
2349         return;
2350     
2351     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2352
2353     for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2354         if (!!dfgCommon->transitions[i].m_codeOrigin)
2355             visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2356         visitor.append(&dfgCommon->transitions[i].m_from);
2357         visitor.append(&dfgCommon->transitions[i].m_to);
2358     }
2359     
2360     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2361         visitor.append(&dfgCommon->weakReferences[i]);
2362 #endif    
2363 }
2364
2365 CodeBlock* CodeBlock::baselineVersion()
2366 {
2367 #if ENABLE(JIT)
2368     // When we're initializing the original baseline code block, we won't be able
2369     // to get its replacement. But we'll know that it's the original baseline code
2370     // block because it won't have JIT code yet and it won't have an alternative.
2371     if (jitType() == JITCode::None && !alternative())
2372         return this;
2373     
2374     CodeBlock* result = replacement();
2375     ASSERT(result);
2376     while (result->alternative())
2377         result = result->alternative();
2378     ASSERT(result);
2379     ASSERT(JITCode::isBaselineCode(result->jitType()));
2380     return result;
2381 #else
2382     return this;
2383 #endif
2384 }
2385
2386 #if ENABLE(JIT)
2387 bool CodeBlock::hasOptimizedReplacement()
2388 {
2389     ASSERT(JITCode::isBaselineCode(jitType()));
2390     bool result = JITCode::isHigherTier(replacement()->jitType(), jitType());
2391     if (result)
2392         ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
2393     else {
2394         ASSERT(JITCode::isBaselineCode(replacement()->jitType()));
2395         ASSERT(replacement() == this);
2396     }
2397     return result;
2398 }
2399 #endif
2400
2401 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
2402 {
2403     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2404
2405     if (!m_rareData)
2406         return 0;
2407     
2408     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2409     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2410         // Handlers are ordered innermost first, so the first handler we encounter
2411         // that contains the source address is the correct handler to use.
2412         if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
2413             return &exceptionHandlers[i];
2414     }
2415
2416     return 0;
2417 }
2418
2419 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2420 {
2421     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2422     return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2423 }
2424
2425 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2426 {
2427     int divot;
2428     int startOffset;
2429     int endOffset;
2430     unsigned line;
2431     unsigned column;
2432     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2433     return column;
2434 }
2435
2436 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2437 {
2438     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2439     divot += m_sourceOffset;
2440     column += line ? 1 : firstLineColumnOffset();
2441     line += m_ownerExecutable->lineNo();
2442 }
2443
2444 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2445 {
2446 #if ENABLE(LLINT)
2447     m_llintCallLinkInfos.shrinkToFit();
2448 #endif
2449 #if ENABLE(JIT)
2450     m_structureStubInfos.shrinkToFit();
2451     m_callLinkInfos.shrinkToFit();
2452 #endif
2453 #if ENABLE(VALUE_PROFILER)
2454     m_rareCaseProfiles.shrinkToFit();
2455     m_specialFastCaseProfiles.shrinkToFit();
2456 #endif
2457     
2458     if (shrinkMode == EarlyShrink) {
2459         m_additionalIdentifiers.shrinkToFit();
2460         m_functionDecls.shrinkToFit();
2461         m_functionExprs.shrinkToFit();
2462         m_constantRegisters.shrinkToFit();
2463         
2464         if (m_rareData) {
2465             m_rareData->m_switchJumpTables.shrinkToFit();
2466             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2467         }
2468     } // else don't shrink these, because we would have already pointed pointers into these tables.
2469
2470     if (m_rareData) {
2471         m_rareData->m_exceptionHandlers.shrinkToFit();
2472 #if ENABLE(JIT)
2473         m_rareData->m_callReturnIndexVector.shrinkToFit();
2474 #endif
2475 #if ENABLE(DFG_JIT)
2476         m_rareData->m_inlineCallFrames.shrinkToFit();
2477         m_rareData->m_codeOrigins.shrinkToFit();
2478 #endif
2479     }
2480 }
2481
2482 void CodeBlock::createActivation(CallFrame* callFrame)
2483 {
2484     ASSERT(codeType() == FunctionCode);
2485     ASSERT(needsFullScopeChain());
2486     ASSERT(!callFrame->uncheckedR(activationRegister()).jsValue());
2487     JSActivation* activation = JSActivation::create(callFrame->vm(), callFrame, this);
2488     callFrame->uncheckedR(activationRegister()) = JSValue(activation);
2489     callFrame->setScope(activation);
2490 }
2491
2492 unsigned CodeBlock::addOrFindConstant(JSValue v)
2493 {
2494     unsigned result;
2495     if (findConstant(v, result))
2496         return result;
2497     return addConstant(v);
2498 }
2499
2500 bool CodeBlock::findConstant(JSValue v, unsigned& index)
2501 {
2502     unsigned numberOfConstants = numberOfConstantRegisters();
2503     for (unsigned i = 0; i < numberOfConstants; ++i) {
2504         if (getConstant(FirstConstantRegisterIndex + i) == v) {
2505             index = i;
2506             return true;
2507         }
2508     }
2509     index = numberOfConstants;
2510     return false;
2511 }
2512
2513 #if ENABLE(JIT)
2514 void CodeBlock::unlinkCalls()
2515 {
2516     if (!!m_alternative)
2517         m_alternative->unlinkCalls();
2518 #if ENABLE(LLINT)
2519     for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2520         if (m_llintCallLinkInfos[i].isLinked())
2521             m_llintCallLinkInfos[i].unlink();
2522     }
2523 #endif
2524     if (!m_callLinkInfos.size())
2525         return;
2526     if (!m_vm->canUseJIT())
2527         return;
2528     RepatchBuffer repatchBuffer(this);
2529     for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
2530         if (!m_callLinkInfos[i].isLinked())
2531             continue;
2532         m_callLinkInfos[i].unlink(*m_vm, repatchBuffer);
2533     }
2534 }
2535
2536 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
2537 {
2538     noticeIncomingCall(callerFrame);
2539     m_incomingCalls.push(incoming);
2540 }
2541 #endif // ENABLE(JIT)
2542
2543 void CodeBlock::unlinkIncomingCalls()
2544 {
2545 #if ENABLE(LLINT)
2546     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2547         m_incomingLLIntCalls.begin()->unlink();
2548 #endif // ENABLE(LLINT)
2549 #if ENABLE(JIT)
2550     if (m_incomingCalls.isEmpty())
2551         return;
2552     RepatchBuffer repatchBuffer(this);
2553     while (m_incomingCalls.begin() != m_incomingCalls.end())
2554         m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer);
2555 #endif // ENABLE(JIT)
2556 }
2557
2558 #if ENABLE(LLINT)
2559 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
2560 {
2561     noticeIncomingCall(callerFrame);
2562     m_incomingLLIntCalls.push(incoming);
2563 }
2564 #endif // ENABLE(LLINT)
2565
2566 #if ENABLE(JIT)
2567 ClosureCallStubRoutine* CodeBlock::findClosureCallForReturnPC(ReturnAddressPtr returnAddress)
2568 {
2569     for (unsigned i = m_callLinkInfos.size(); i--;) {
2570         CallLinkInfo& info = m_callLinkInfos[i];
2571         if (!info.stub)
2572             continue;
2573         if (!info.stub->code().executableMemory()->contains(returnAddress.value()))
2574             continue;
2575
2576         RELEASE_ASSERT(info.stub->codeOrigin().bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
2577         return info.stub.get();
2578     }
2579     
2580     // The stub routine may have been jettisoned. This is rare, but we have to handle it.
2581     const JITStubRoutineSet& set = m_vm->heap.jitStubRoutines();
2582     for (unsigned i = set.size(); i--;) {
2583         GCAwareJITStubRoutine* genericStub = set.at(i);
2584         if (!genericStub->isClosureCall())
2585             continue;
2586         ClosureCallStubRoutine* stub = static_cast<ClosureCallStubRoutine*>(genericStub);
2587         if (!stub->code().executableMemory()->contains(returnAddress.value()))
2588             continue;
2589         RELEASE_ASSERT(stub->codeOrigin().bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
2590         return stub;
2591     }
2592     
2593     return 0;
2594 }
2595 #endif
2596
2597 unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddress)
2598 {
2599     UNUSED_PARAM(exec);
2600     UNUSED_PARAM(returnAddress);
2601 #if ENABLE(LLINT)
2602 #if !ENABLE(LLINT_C_LOOP)
2603     // When using the JIT, we could have addresses that are not bytecode
2604     // addresses. We check if the return address is in the LLint glue and
2605     // opcode handlers range here to ensure that we are looking at bytecode
2606     // before attempting to convert the return address into a bytecode offset.
2607     //
2608     // In the case of the C Loop LLInt, the JIT is disabled, and the only
2609     // valid return addresses should be bytecode PCs. So, we can and need to
2610     // forego this check because when we do not ENABLE(COMPUTED_GOTO_OPCODES),
2611     // then the bytecode "PC"s are actually the opcodeIDs and are not bounded
2612     // by llint_begin and llint_end.
2613     if (returnAddress.value() >= LLInt::getCodePtr(llint_begin)
2614         && returnAddress.value() <= LLInt::getCodePtr(llint_end))
2615 #endif
2616     {
2617         RELEASE_ASSERT(exec->codeBlock());
2618         RELEASE_ASSERT(exec->codeBlock() == this);
2619         RELEASE_ASSERT(JITCode::isBaselineCode(jitType()));
2620         Instruction* instruction = exec->currentVPC();
2621         RELEASE_ASSERT(instruction);
2622
2623         return bytecodeOffset(instruction);
2624     }
2625 #endif // !ENABLE(LLINT)
2626
2627 #if ENABLE(JIT)
2628     if (!m_rareData)
2629         return 1;
2630     Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector;
2631     if (!callIndices.size())
2632         return 1;
2633     
2634     if (jitCode()->contains(returnAddress.value())) {
2635         unsigned callReturnOffset = jitCode()->offsetOf(returnAddress.value());
2636         CallReturnOffsetToBytecodeOffset* result =
2637             binarySearch<CallReturnOffsetToBytecodeOffset, unsigned>(
2638                 callIndices, callIndices.size(), callReturnOffset, getCallReturnOffset);
2639         RELEASE_ASSERT(result->callReturnOffset == callReturnOffset);
2640         RELEASE_ASSERT(result->bytecodeOffset < instructionCount());
2641         return result->bytecodeOffset;
2642     }
2643     ClosureCallStubRoutine* closureInfo = findClosureCallForReturnPC(returnAddress);
2644     CodeOrigin origin = closureInfo->codeOrigin();
2645     while (InlineCallFrame* inlineCallFrame = origin.inlineCallFrame) {
2646         if (inlineCallFrame->baselineCodeBlock() == this)
2647             break;
2648         origin = inlineCallFrame->caller;
2649         RELEASE_ASSERT(origin.bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
2650     }
2651     RELEASE_ASSERT(origin.bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
2652     unsigned bytecodeIndex = origin.bytecodeIndex;
2653     RELEASE_ASSERT(bytecodeIndex < instructionCount());
2654     return bytecodeIndex;
2655 #endif // ENABLE(JIT)
2656
2657 #if !ENABLE(LLINT) && !ENABLE(JIT)
2658     return 1;
2659 #endif
2660 }
2661
2662 void CodeBlock::clearEvalCache()
2663 {
2664     if (!!m_alternative)
2665         m_alternative->clearEvalCache();
2666     if (!m_rareData)
2667         return;
2668     m_rareData->m_evalCodeCache.clear();
2669 }
2670
2671 template<typename T, size_t inlineCapacity, typename U, typename V>
2672 inline void replaceExistingEntries(Vector<T, inlineCapacity, U>& target, Vector<T, inlineCapacity, V>& source)
2673 {
2674     ASSERT(target.size() <= source.size());
2675     for (size_t i = 0; i < target.size(); ++i)
2676         target[i] = source[i];
2677 }
2678
2679 void CodeBlock::copyPostParseDataFrom(CodeBlock* alternative)
2680 {
2681     if (!alternative)
2682         return;
2683     
2684     replaceExistingEntries(m_constantRegisters, alternative->m_constantRegisters);
2685     replaceExistingEntries(m_functionDecls, alternative->m_functionDecls);
2686     replaceExistingEntries(m_functionExprs, alternative->m_functionExprs);
2687     if (!!m_rareData && !!alternative->m_rareData)
2688         replaceExistingEntries(m_rareData->m_constantBuffers, alternative->m_rareData->m_constantBuffers);
2689 }
2690
2691 void CodeBlock::copyPostParseDataFromAlternative()
2692 {
2693     copyPostParseDataFrom(m_alternative.get());
2694 }
2695
2696 CompilationResult CodeBlock::prepareForExecutionImpl(
2697     ExecState* exec, JITCode::JITType jitType, JITCompilationEffort effort,
2698     unsigned bytecodeIndex, PassRefPtr<DeferredCompilationCallback> callback)
2699 {
2700     VM& vm = exec->vm();
2701     
2702     if (jitType == JITCode::InterpreterThunk) {
2703         switch (codeType()) {
2704         case GlobalCode:
2705             LLInt::setProgramEntrypoint(vm, static_cast<ProgramCodeBlock*>(this));
2706             break;
2707         case EvalCode:
2708             LLInt::setEvalEntrypoint(vm, static_cast<EvalCodeBlock*>(this));
2709             break;
2710         case FunctionCode:
2711             LLInt::setFunctionEntrypoint(vm, static_cast<FunctionCodeBlock*>(this));
2712             break;
2713         }
2714         return CompilationSuccessful;
2715     }
2716     
2717 #if ENABLE(JIT)
2718     if (JITCode::isOptimizingJIT(jitType)) {
2719         ASSERT(effort == JITCompilationCanFail);
2720         bool hadCallback = !!callback;
2721         CompilationResult result = DFG::tryCompile(exec, this, bytecodeIndex, callback);
2722         ASSERT_UNUSED(hadCallback, result != CompilationDeferred || hadCallback);
2723         return result;
2724     }
2725     
2726     MacroAssemblerCodePtr jitCodeWithArityCheck;
2727     RefPtr<JITCode> jitCode = JIT::compile(&vm, this, effort, &jitCodeWithArityCheck);
2728     if (!jitCode)
2729         return CompilationFailed;
2730     setJITCode(jitCode, jitCodeWithArityCheck);
2731     return CompilationSuccessful;
2732 #else
2733     UNUSED_PARAM(effort);
2734     UNUSED_PARAM(bytecodeIndex);
2735     UNUSED_PARAM(callback);
2736     return CompilationFailed;
2737 #endif // ENABLE(JIT)
2738 }
2739
2740 CompilationResult CodeBlock::prepareForExecution(
2741     ExecState* exec, JITCode::JITType jitType,
2742     JITCompilationEffort effort, unsigned bytecodeIndex)
2743 {
2744     CompilationResult result =
2745         prepareForExecutionImpl(exec, jitType, effort, bytecodeIndex, 0);
2746     ASSERT(result != CompilationDeferred);
2747     return result;
2748 }
2749
2750 CompilationResult CodeBlock::prepareForExecutionAsynchronously(
2751     ExecState* exec, JITCode::JITType jitType,
2752     PassRefPtr<DeferredCompilationCallback> passedCallback,
2753     JITCompilationEffort effort, unsigned bytecodeIndex)
2754 {
2755     RefPtr<DeferredCompilationCallback> callback = passedCallback;
2756     CompilationResult result =
2757         prepareForExecutionImpl(exec, jitType, effort, bytecodeIndex, callback);
2758     if (result != CompilationDeferred)
2759         callback->compilationDidComplete(this, result);
2760     return result;
2761 }
2762
2763 void CodeBlock::install()
2764 {
2765     ownerExecutable()->installCode(this);
2766 }
2767
2768 PassRefPtr<CodeBlock> CodeBlock::newReplacement()
2769 {
2770     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
2771 }
2772
2773 #if ENABLE(JIT)
2774 void CodeBlock::reoptimize()
2775 {
2776     ASSERT(replacement() != this);
2777     ASSERT(replacement()->alternative() == this);
2778     if (DFG::shouldShowDisassembly())
2779         dataLog(*replacement(), " will be jettisoned due to reoptimization of ", *this, ".\n");
2780     replacement()->jettison();
2781     countReoptimization();
2782 }
2783
2784 CodeBlock* ProgramCodeBlock::replacement()
2785 {
2786     return &static_cast<ProgramExecutable*>(ownerExecutable())->generatedBytecode();
2787 }
2788
2789 CodeBlock* EvalCodeBlock::replacement()
2790 {
2791     return &static_cast<EvalExecutable*>(ownerExecutable())->generatedBytecode();
2792 }
2793
2794 CodeBlock* FunctionCodeBlock::replacement()
2795 {
2796     return &static_cast<FunctionExecutable*>(ownerExecutable())->generatedBytecodeFor(m_isConstructor ? CodeForConstruct : CodeForCall);
2797 }
2798
2799 DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
2800 {
2801     return DFG::programCapabilityLevel(this);
2802 }
2803
2804 DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
2805 {
2806     return DFG::evalCapabilityLevel(this);
2807 }
2808
2809 DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
2810 {
2811     if (m_isConstructor)
2812         return DFG::functionForConstructCapabilityLevel(this);
2813     return DFG::functionForCallCapabilityLevel(this);
2814 }
2815
2816 void CodeBlock::jettison()
2817 {
2818     ASSERT(JITCode::isOptimizingJIT(jitType()));
2819     ASSERT(this == replacement());
2820     alternative()->optimizeAfterWarmUp();
2821     tallyFrequentExitSites();
2822     if (DFG::shouldShowDisassembly())
2823         dataLog("Jettisoning ", *this, ".\n");
2824     jettisonImpl();
2825 }
2826
2827 void ProgramCodeBlock::jettisonImpl()
2828 {
2829     static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm());
2830 }
2831
2832 void EvalCodeBlock::jettisonImpl()
2833 {
2834     static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm());
2835 }
2836
2837 void FunctionCodeBlock::jettisonImpl()
2838 {
2839     static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*vm(), m_isConstructor ? CodeForConstruct : CodeForCall);
2840 }
2841 #endif
2842
2843 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2844 {
2845     if (!codeOrigin.inlineCallFrame)
2846         return globalObject();
2847     return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->generatedBytecode().globalObject();
2848 }
2849
2850 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2851 {
2852     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2853     
2854     if (Options::verboseCallLink())
2855         dataLog("Noticing call link from ", *callerCodeBlock, " to ", *this, "\n");
2856     
2857     if (!m_shouldAlwaysBeInlined)
2858         return;
2859
2860 #if ENABLE(DFG_JIT)
2861     if (!hasBaselineJITProfiling())
2862         return;
2863
2864     if (!DFG::mightInlineFunction(this))
2865         return;
2866
2867     if (!canInline(m_capabilityLevelState))
2868         return;
2869
2870     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2871         // If the caller is still in the interpreter, then we can't expect inlining to
2872         // happen anytime soon. Assume it's profitable to optimize it separately. This
2873         // ensures that a function is SABI only if it is called no more frequently than
2874         // any of its callers.
2875         m_shouldAlwaysBeInlined = false;
2876         if (Options::verboseCallLink())
2877             dataLog("    Marking SABI because caller is in LLInt.\n");
2878         return;
2879     }
2880     
2881     if (callerCodeBlock->codeType() != FunctionCode) {
2882         // If the caller is either eval or global code, assume that that won't be
2883         // optimized anytime soon. For eval code this is particularly true since we
2884         // delay eval optimization by a *lot*.
2885         m_shouldAlwaysBeInlined = false;
2886         if (Options::verboseCallLink())
2887             dataLog("    Marking SABI because caller is not a function.\n");
2888         return;
2889     }
2890     
2891     ExecState* frame = callerFrame;
2892     for (unsigned i = Options::maximumInliningDepth(); i--; frame = frame->callerFrame()) {
2893         if (frame->hasHostCallFrameFlag())
2894             break;
2895         if (frame->codeBlock() == this) {
2896             // Recursive calls won't be inlined.
2897             if (Options::verboseCallLink())
2898                 dataLog("    Marking SABI because recursion was detected.\n");
2899             m_shouldAlwaysBeInlined = false;
2900             return;
2901         }
2902     }
2903     
2904     RELEASE_ASSERT(callerCodeBlock->m_capabilityLevelState != DFG::CapabilityLevelNotSet);
2905     
2906     if (canCompile(callerCodeBlock->m_capabilityLevelState))
2907         return;
2908     
2909     if (Options::verboseCallLink())
2910         dataLog("    Marking SABI because the caller is not a DFG candidate.\n");
2911     
2912     m_shouldAlwaysBeInlined = false;
2913 #endif
2914 }
2915
2916 #if ENABLE(JIT)
2917 unsigned CodeBlock::reoptimizationRetryCounter() const
2918 {
2919     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2920     return m_reoptimizationRetryCounter;
2921 }
2922
2923 void CodeBlock::countReoptimization()
2924 {
2925     m_reoptimizationRetryCounter++;
2926     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2927         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2928 }
2929
2930 unsigned CodeBlock::numberOfDFGCompiles()
2931 {
2932     ASSERT(JITCode::isBaselineCode(jitType()));
2933     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2934 }
2935
2936 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2937 {
2938     if (codeType() == EvalCode)
2939         return Options::evalThresholdMultiplier();
2940     
2941     return 1;
2942 }
2943
2944 double CodeBlock::optimizationThresholdScalingFactor()
2945 {
2946     // This expression arises from doing a least-squares fit of
2947     //
2948     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2949     //
2950     // against the data points:
2951     //
2952     //    x       F[x_]
2953     //    10       0.9          (smallest reasonable code block)
2954     //   200       1.0          (typical small-ish code block)
2955     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2956     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2957     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2958     // 10000       6.0          (similar to above)
2959     //
2960     // I achieve the minimization using the following Mathematica code:
2961     //
2962     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2963     //
2964     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2965     //
2966     // solution = 
2967     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2968     //         {a, b, c, d}][[2]]
2969     //
2970     // And the code below (to initialize a, b, c, d) is generated by:
2971     //
2972     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2973     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2974     //
2975     // We've long known the following to be true:
2976     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2977     //   than later.
2978     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2979     //   and sometimes have a large enough threshold that we never optimize them.
2980     // - The difference in cost is not totally linear because (a) just invoking the
2981     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2982     //   in the correlation between instruction count and the actual compilation cost
2983     //   that for those large blocks, the instruction count should not have a strong
2984     //   influence on our threshold.
2985     //
2986     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2987     // example where the heuristics were right (code block in 3d-cube with instruction
2988     // count 320, which got compiled early as it should have been) and one where they were
2989     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2990     // to compile and didn't run often enough to warrant compilation in my opinion), and
2991     // then threw in additional data points that represented my own guess of what our
2992     // heuristics should do for some round-numbered examples.
2993     //
2994     // The expression to which I decided to fit the data arose because I started with an
2995     // affine function, and then did two things: put the linear part in an Abs to ensure
2996     // that the fit didn't end up choosing a negative value of c (which would result in
2997     // the function turning over and going negative for large x) and I threw in a Sqrt
2998     // term because Sqrt represents my intution that the function should be more sensitive
2999     // to small changes in small values of x, but less sensitive when x gets large.
3000     
3001     // Note that the current fit essentially eliminates the linear portion of the
3002     // expression (c == 0.0).
3003     const double a = 0.061504;
3004     const double b = 1.02406;
3005     const double c = 0.0;
3006     const double d = 0.825914;
3007     
3008     double instructionCount = this->instructionCount();
3009     
3010     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
3011     
3012     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
3013     if (Options::verboseOSR()) {
3014         dataLog(
3015             *this, ": instruction count is ", instructionCount,
3016             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
3017             "\n");
3018     }
3019     return result * codeTypeThresholdMultiplier();
3020 }
3021
3022 static int32_t clipThreshold(double threshold)
3023 {
3024     if (threshold < 1.0)
3025         return 1;
3026     
3027     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
3028         return std::numeric_limits<int32_t>::max();
3029     
3030     return static_cast<int32_t>(threshold);
3031 }
3032
3033 int32_t CodeBlock::counterValueForOptimizeAfterWarmUp()
3034 {
3035     return clipThreshold(
3036         Options::thresholdForOptimizeAfterWarmUp() *
3037         optimizationThresholdScalingFactor() *
3038         (1 << reoptimizationRetryCounter()));
3039 }
3040
3041 int32_t CodeBlock::counterValueForOptimizeAfterLongWarmUp()
3042 {
3043     return clipThreshold(
3044         Options::thresholdForOptimizeAfterLongWarmUp() *
3045         optimizationThresholdScalingFactor() *
3046         (1 << reoptimizationRetryCounter()));
3047 }
3048
3049 int32_t CodeBlock::counterValueForOptimizeSoon()
3050 {
3051     return clipThreshold(
3052         Options::thresholdForOptimizeSoon() *
3053         optimizationThresholdScalingFactor() *
3054         (1 << reoptimizationRetryCounter()));
3055 }
3056
3057 bool CodeBlock::checkIfOptimizationThresholdReached()
3058 {
3059 #if ENABLE(DFG_JIT)
3060     if (m_vm->worklist
3061         && m_vm->worklist->compilationState(this) == DFG::Worklist::Compiled) {
3062         optimizeNextInvocation();
3063         return true;
3064     }
3065 #endif
3066     
3067     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
3068 }
3069
3070 void CodeBlock::optimizeNextInvocation()
3071 {
3072     if (Options::verboseOSR())
3073         dataLog(*this, ": Optimizing next invocation.\n");
3074     m_jitExecuteCounter.setNewThreshold(0, this);
3075 }
3076
3077 void CodeBlock::dontOptimizeAnytimeSoon()
3078 {
3079     if (Options::verboseOSR())
3080         dataLog(*this, ": Not optimizing anytime soon.\n");
3081     m_jitExecuteCounter.deferIndefinitely();
3082 }
3083
3084 void CodeBlock::optimizeAfterWarmUp()
3085 {
3086     if (Options::verboseOSR())
3087         dataLog(*this, ": Optimizing after warm-up.\n");
3088 #if ENABLE(DFG_JIT)
3089     m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
3090 #endif
3091 }
3092
3093 void CodeBlock::optimizeAfterLongWarmUp()
3094 {
3095     if (Options::verboseOSR())
3096         dataLog(*this, ": Optimizing after long warm-up.\n");
3097 #if ENABLE(DFG_JIT)
3098     m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
3099 #endif
3100 }
3101
3102 void CodeBlock::optimizeSoon()
3103 {
3104     if (Options::verboseOSR())
3105         dataLog(*this, ": Optimizing soon.\n");
3106 #if ENABLE(DFG_JIT)
3107     m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeSoon(), this);
3108 #endif
3109 }
3110
3111 void CodeBlock::forceOptimizationSlowPathConcurrently()
3112 {
3113     if (Options::verboseOSR())
3114         dataLog(*this, ": Forcing slow path concurrently.\n");
3115     m_jitExecuteCounter.forceSlowPathConcurrently();
3116 }
3117
3118 #if ENABLE(DFG_JIT)
3119 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
3120 {
3121     RELEASE_ASSERT(jitType() == JITCode::BaselineJIT);
3122     RELEASE_ASSERT((result == CompilationSuccessful) == (replacement() != this));
3123     switch (result) {
3124     case CompilationSuccessful:
3125         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3126         optimizeNextInvocation();
3127         break;
3128     case CompilationFailed:
3129         dontOptimizeAnytimeSoon();
3130         break;
3131     case CompilationDeferred:
3132         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3133         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3134         // necessarily guarantee anything. So, we make sure that even if that
3135         // function ends up being a no-op, we still eventually retry and realize
3136         // that we have optimized code ready.
3137         optimizeAfterWarmUp();
3138         break;
3139     case CompilationInvalidated:
3140         // Retry with exponential backoff.
3141         countReoptimization();
3142         optimizeAfterWarmUp();
3143         break;
3144     default:
3145         RELEASE_ASSERT_NOT_REACHED();
3146         break;
3147     }
3148 }
3149
3150 #endif
3151     
3152 static bool structureStubInfoLessThan(const StructureStubInfo& a, const StructureStubInfo& b)
3153 {
3154     return a.callReturnLocation.executableAddress() < b.callReturnLocation.executableAddress();
3155 }
3156
3157 void CodeBlock::sortStructureStubInfos()
3158 {
3159     std::sort(m_structureStubInfos.begin(), m_structureStubInfos.end(), structureStubInfoLessThan);
3160 }
3161
3162 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
3163 {
3164     ASSERT(JITCode::isOptimizingJIT(jitType()));
3165     // Compute this the lame way so we don't saturate. This is called infrequently
3166     // enough that this loop won't hurt us.
3167     unsigned result = desiredThreshold;
3168     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
3169         unsigned newResult = result << 1;
3170         if (newResult < result)
3171             return std::numeric_limits<uint32_t>::max();
3172         result = newResult;
3173     }
3174     return result;
3175 }
3176
3177 uint32_t CodeBlock::exitCountThresholdForReoptimization()
3178 {
3179     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3180 }
3181
3182 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3183 {
3184     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3185 }
3186
3187 bool CodeBlock::shouldReoptimizeNow()
3188 {
3189     return osrExitCounter() >= exitCountThresholdForReoptimization();
3190 }
3191
3192 bool CodeBlock::shouldReoptimizeFromLoopNow()
3193 {
3194     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3195 }
3196 #endif
3197
3198 #if ENABLE(VALUE_PROFILER)
3199 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
3200 {
3201     for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
3202         if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
3203             return &m_arrayProfiles[i];
3204     }
3205     return 0;
3206 }
3207
3208 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
3209 {
3210     ArrayProfile* result = getArrayProfile(bytecodeOffset);
3211     if (result)
3212         return result;
3213     return addArrayProfile(bytecodeOffset);
3214 }
3215
3216 void CodeBlock::updateAllPredictionsAndCountLiveness(
3217     OperationInProgress operation, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
3218 {
3219     ConcurrentJITLocker locker(m_lock);
3220     
3221     numberOfLiveNonArgumentValueProfiles = 0;
3222     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3223     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3224         ValueProfile* profile = getFromAllValueProfiles(i);
3225         unsigned numSamples = profile->totalNumberOfSamples();
3226         if (numSamples > ValueProfile::numberOfBuckets)
3227             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
3228         numberOfSamplesInProfiles += numSamples;
3229         if (profile->m_bytecodeOffset < 0) {
3230             profile->computeUpdatedPrediction(locker, operation);
3231             continue;
3232         }
3233         if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
3234             numberOfLiveNonArgumentValueProfiles++;
3235         profile->computeUpdatedPrediction(locker, operation);
3236     }
3237     
3238 #if ENABLE(DFG_JIT)
3239     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker, operation);
3240 #endif
3241 }
3242
3243 void CodeBlock::updateAllValueProfilePredictions(OperationInProgress operation)
3244 {
3245     unsigned ignoredValue1, ignoredValue2;
3246     updateAllPredictionsAndCountLiveness(operation, ignoredValue1, ignoredValue2);
3247 }
3248
3249 void CodeBlock::updateAllArrayPredictions()
3250 {
3251     ConcurrentJITLocker locker(m_lock);
3252     
3253     for (unsigned i = m_arrayProfiles.size(); i--;)
3254         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
3255     
3256     // Don't count these either, for similar reasons.
3257     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
3258         m_arrayAllocationProfiles[i].updateIndexingType();
3259 }
3260