Unreviewed, rolling out r154804.
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "BytecodeGenerator.h"
34 #include "CallLinkStatus.h"
35 #include "DFGCapabilities.h"
36 #include "DFGCommon.h"
37 #include "DFGNode.h"
38 #include "DFGRepatch.h"
39 #include "DFGWorklist.h"
40 #include "Debugger.h"
41 #include "Interpreter.h"
42 #include "JIT.h"
43 #include "JITStubs.h"
44 #include "JSActivation.h"
45 #include "JSCJSValue.h"
46 #include "JSFunction.h"
47 #include "JSNameScope.h"
48 #include "LowLevelInterpreter.h"
49 #include "Operations.h"
50 #include "PolymorphicPutByIdList.h"
51 #include "ReduceWhitespace.h"
52 #include "RepatchBuffer.h"
53 #include "SlotVisitorInlines.h"
54 #include <stdio.h>
55 #include <wtf/CommaPrinter.h>
56 #include <wtf/StringExtras.h>
57 #include <wtf/StringPrintStream.h>
58
59 #if ENABLE(DFG_JIT)
60 #include "DFGOperations.h"
61 #endif
62
63 #if ENABLE(FTL_JIT)
64 #include "FTLJITCode.h"
65 #endif
66
67 #define DUMP_CODE_BLOCK_STATISTICS 0
68
69 namespace JSC {
70
71 CString CodeBlock::inferredName() const
72 {
73     switch (codeType()) {
74     case GlobalCode:
75         return "<global>";
76     case EvalCode:
77         return "<eval>";
78     case FunctionCode:
79         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
80     default:
81         CRASH();
82         return CString("", 0);
83     }
84 }
85
86 bool CodeBlock::hasHash() const
87 {
88     return !!m_hash;
89 }
90
91 bool CodeBlock::isSafeToComputeHash() const
92 {
93     return !isCompilationThread();
94 }
95
96 CodeBlockHash CodeBlock::hash() const
97 {
98     if (!m_hash) {
99         RELEASE_ASSERT(isSafeToComputeHash());
100         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
101     }
102     return m_hash;
103 }
104
105 CString CodeBlock::sourceCodeForTools() const
106 {
107     if (codeType() != FunctionCode)
108         return ownerExecutable()->source().toUTF8();
109     
110     SourceProvider* provider = source();
111     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
112     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
113     unsigned unlinkedStartOffset = unlinked->startOffset();
114     unsigned linkedStartOffset = executable->source().startOffset();
115     int delta = linkedStartOffset - unlinkedStartOffset;
116     unsigned rangeStart = delta + unlinked->functionStartOffset();
117     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
118     return toCString(
119         "function ",
120         provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
121 }
122
123 CString CodeBlock::sourceCodeOnOneLine() const
124 {
125     return reduceWhitespace(sourceCodeForTools());
126 }
127
128 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
129 {
130     if (hasHash() || isSafeToComputeHash())
131         out.print(inferredName(), "#", hash(), ":[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
132     else
133         out.print(inferredName(), "#<no-hash>:[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
134
135     if (codeType() == FunctionCode)
136         out.print(specializationKind());
137     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
138         out.print(" (SABI)");
139     if (ownerExecutable()->neverInline())
140         out.print(" (NeverInline)");
141     out.print("]");
142 }
143
144 void CodeBlock::dump(PrintStream& out) const
145 {
146     dumpAssumingJITType(out, jitType());
147 }
148
149 static CString constantName(int k, JSValue value)
150 {
151     return toCString(value, "(@k", k - FirstConstantRegisterIndex, ")");
152 }
153
154 static CString idName(int id0, const Identifier& ident)
155 {
156     return toCString(ident.impl(), "(@id", id0, ")");
157 }
158
159 CString CodeBlock::registerName(int r) const
160 {
161     if (r == missingThisObjectMarker())
162         return "<null>";
163
164     if (isConstantRegisterIndex(r))
165         return constantName(r, getConstant(r));
166
167     return toCString("r", r);
168 }
169
170 static CString regexpToSourceString(RegExp* regExp)
171 {
172     char postfix[5] = { '/', 0, 0, 0, 0 };
173     int index = 1;
174     if (regExp->global())
175         postfix[index++] = 'g';
176     if (regExp->ignoreCase())
177         postfix[index++] = 'i';
178     if (regExp->multiline())
179         postfix[index] = 'm';
180
181     return toCString("/", regExp->pattern().impl(), postfix);
182 }
183
184 static CString regexpName(int re, RegExp* regexp)
185 {
186     return toCString(regexpToSourceString(regexp), "(@re", re, ")");
187 }
188
189 NEVER_INLINE static const char* debugHookName(int debugHookID)
190 {
191     switch (static_cast<DebugHookID>(debugHookID)) {
192         case DidEnterCallFrame:
193             return "didEnterCallFrame";
194         case WillLeaveCallFrame:
195             return "willLeaveCallFrame";
196         case WillExecuteStatement:
197             return "willExecuteStatement";
198         case WillExecuteProgram:
199             return "willExecuteProgram";
200         case DidExecuteProgram:
201             return "didExecuteProgram";
202         case DidReachBreakpoint:
203             return "didReachBreakpoint";
204     }
205
206     RELEASE_ASSERT_NOT_REACHED();
207     return "";
208 }
209
210 void CodeBlock::printUnaryOp(PrintStream& out, ExecState*, int location, const Instruction*& it, const char* op)
211 {
212     int r0 = (++it)->u.operand;
213     int r1 = (++it)->u.operand;
214
215     out.printf("[%4d] %s\t\t %s, %s", location, op, registerName(r0).data(), registerName(r1).data());
216 }
217
218 void CodeBlock::printBinaryOp(PrintStream& out, ExecState*, int location, const Instruction*& it, const char* op)
219 {
220     int r0 = (++it)->u.operand;
221     int r1 = (++it)->u.operand;
222     int r2 = (++it)->u.operand;
223     out.printf("[%4d] %s\t\t %s, %s, %s", location, op, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
224 }
225
226 void CodeBlock::printConditionalJump(PrintStream& out, ExecState*, const Instruction*, const Instruction*& it, int location, const char* op)
227 {
228     int r0 = (++it)->u.operand;
229     int offset = (++it)->u.operand;
230     out.printf("[%4d] %s\t\t %s, %d(->%d)", location, op, registerName(r0).data(), offset, location + offset);
231 }
232
233 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
234 {
235     const char* op;
236     switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
237     case op_get_by_id:
238         op = "get_by_id";
239         break;
240     case op_get_by_id_out_of_line:
241         op = "get_by_id_out_of_line";
242         break;
243     case op_get_by_id_self:
244         op = "get_by_id_self";
245         break;
246     case op_get_by_id_proto:
247         op = "get_by_id_proto";
248         break;
249     case op_get_by_id_chain:
250         op = "get_by_id_chain";
251         break;
252     case op_get_by_id_getter_self:
253         op = "get_by_id_getter_self";
254         break;
255     case op_get_by_id_getter_proto:
256         op = "get_by_id_getter_proto";
257         break;
258     case op_get_by_id_getter_chain:
259         op = "get_by_id_getter_chain";
260         break;
261     case op_get_by_id_custom_self:
262         op = "get_by_id_custom_self";
263         break;
264     case op_get_by_id_custom_proto:
265         op = "get_by_id_custom_proto";
266         break;
267     case op_get_by_id_custom_chain:
268         op = "get_by_id_custom_chain";
269         break;
270     case op_get_by_id_generic:
271         op = "get_by_id_generic";
272         break;
273     case op_get_array_length:
274         op = "array_length";
275         break;
276     case op_get_string_length:
277         op = "string_length";
278         break;
279     default:
280         RELEASE_ASSERT_NOT_REACHED();
281         op = 0;
282     }
283     int r0 = (++it)->u.operand;
284     int r1 = (++it)->u.operand;
285     int id0 = (++it)->u.operand;
286     out.printf("[%4d] %s\t %s, %s, %s", location, op, registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
287     it += 4; // Increment up to the value profiler.
288 }
289
290 #if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations
291 static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, const Identifier& ident)
292 {
293     if (!structure)
294         return;
295     
296     out.printf("%s = %p", name, structure);
297     
298     PropertyOffset offset = structure->getConcurrently(exec->vm(), ident.impl());
299     if (offset != invalidOffset)
300         out.printf(" (offset = %d)", offset);
301 }
302 #endif
303
304 #if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
305 static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, const Identifier& ident)
306 {
307     out.printf("chain = %p: [", chain);
308     bool first = true;
309     for (WriteBarrier<Structure>* currentStructure = chain->head();
310          *currentStructure;
311          ++currentStructure) {
312         if (first)
313             first = false;
314         else
315             out.printf(", ");
316         dumpStructure(out, "struct", exec, currentStructure->get(), ident);
317     }
318     out.printf("]");
319 }
320 #endif
321
322 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location)
323 {
324     Instruction* instruction = instructions().begin() + location;
325
326     const Identifier& ident = identifier(instruction[3].u.operand);
327     
328     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
329     
330 #if ENABLE(LLINT)
331     if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
332         out.printf(" llint(array_length)");
333     else if (Structure* structure = instruction[4].u.structure.get()) {
334         out.printf(" llint(");
335         dumpStructure(out, "struct", exec, structure, ident);
336         out.printf(")");
337     }
338 #endif
339
340 #if ENABLE(JIT)
341     if (numberOfStructureStubInfos()) {
342         StructureStubInfo& stubInfo = getStubInfo(location);
343         if (stubInfo.seen) {
344             out.printf(" jit(");
345             
346             Structure* baseStructure = 0;
347             Structure* prototypeStructure = 0;
348             StructureChain* chain = 0;
349             PolymorphicAccessStructureList* structureList = 0;
350             int listSize = 0;
351             
352             switch (stubInfo.accessType) {
353             case access_get_by_id_self:
354                 out.printf("self");
355                 baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
356                 break;
357             case access_get_by_id_proto:
358                 out.printf("proto");
359                 baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get();
360                 prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get();
361                 break;
362             case access_get_by_id_chain:
363                 out.printf("chain");
364                 baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
365                 chain = stubInfo.u.getByIdChain.chain.get();
366                 break;
367             case access_get_by_id_self_list:
368                 out.printf("self_list");
369                 structureList = stubInfo.u.getByIdSelfList.structureList;
370                 listSize = stubInfo.u.getByIdSelfList.listSize;
371                 break;
372             case access_get_by_id_proto_list:
373                 out.printf("proto_list");
374                 structureList = stubInfo.u.getByIdProtoList.structureList;
375                 listSize = stubInfo.u.getByIdProtoList.listSize;
376                 break;
377             case access_unset:
378                 out.printf("unset");
379                 break;
380             case access_get_by_id_generic:
381                 out.printf("generic");
382                 break;
383             case access_get_array_length:
384                 out.printf("array_length");
385                 break;
386             case access_get_string_length:
387                 out.printf("string_length");
388                 break;
389             default:
390                 RELEASE_ASSERT_NOT_REACHED();
391                 break;
392             }
393             
394             if (baseStructure) {
395                 out.printf(", ");
396                 dumpStructure(out, "struct", exec, baseStructure, ident);
397             }
398             
399             if (prototypeStructure) {
400                 out.printf(", ");
401                 dumpStructure(out, "prototypeStruct", exec, baseStructure, ident);
402             }
403             
404             if (chain) {
405                 out.printf(", ");
406                 dumpChain(out, exec, chain, ident);
407             }
408             
409             if (structureList) {
410                 out.printf(", list = %p: [", structureList);
411                 for (int i = 0; i < listSize; ++i) {
412                     if (i)
413                         out.printf(", ");
414                     out.printf("(");
415                     dumpStructure(out, "base", exec, structureList->list[i].base.get(), ident);
416                     if (structureList->list[i].isChain) {
417                         if (structureList->list[i].u.chain.get()) {
418                             out.printf(", ");
419                             dumpChain(out, exec, structureList->list[i].u.chain.get(), ident);
420                         }
421                     } else {
422                         if (structureList->list[i].u.proto.get()) {
423                             out.printf(", ");
424                             dumpStructure(out, "proto", exec, structureList->list[i].u.proto.get(), ident);
425                         }
426                     }
427                     out.printf(")");
428                 }
429                 out.printf("]");
430             }
431             out.printf(")");
432         }
433     }
434 #endif
435 }
436
437 void CodeBlock::printCallOp(PrintStream& out, ExecState*, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling)
438 {
439     int dst = (++it)->u.operand;
440     int func = (++it)->u.operand;
441     int argCount = (++it)->u.operand;
442     int registerOffset = (++it)->u.operand;
443     out.printf("[%4d] %s %s, %s, %d, %d", location, op, registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
444     if (cacheDumpMode == DumpCaches) {
445 #if ENABLE(LLINT)
446         LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
447         if (callLinkInfo->lastSeenCallee) {
448             out.printf(
449                 " llint(%p, exec %p)",
450                 callLinkInfo->lastSeenCallee.get(),
451                 callLinkInfo->lastSeenCallee->executable());
452         }
453 #endif
454 #if ENABLE(JIT)
455         if (numberOfCallLinkInfos()) {
456             JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get();
457             if (target)
458                 out.printf(" jit(%p, exec %p)", target, target->executable());
459         }
460 #endif
461         out.print(" status(", CallLinkStatus::computeFor(this, location), ")");
462     }
463     ++it;
464     dumpArrayProfiling(out, it, hasPrintedProfiling);
465     dumpValueProfiling(out, it, hasPrintedProfiling);
466 }
467
468 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState*, int location, const Instruction*& it, const char* op)
469 {
470     int r0 = (++it)->u.operand;
471     int id0 = (++it)->u.operand;
472     int r1 = (++it)->u.operand;
473     out.printf("[%4d] %s\t %s, %s, %s", location, op, registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
474     it += 5;
475 }
476
477 void CodeBlock::dumpBytecode(PrintStream& out)
478 {
479     // We only use the ExecState* for things that don't actually lead to JS execution,
480     // like converting a JSString to a String. Hence the globalExec is appropriate.
481     ExecState* exec = m_globalObject->globalExec();
482     
483     size_t instructionCount = 0;
484
485     for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
486         ++instructionCount;
487
488     out.print(*this);
489     out.printf(
490         ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
491         static_cast<unsigned long>(instructions().size()),
492         static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
493         m_numParameters, m_numCalleeRegisters, m_numVars);
494     if (symbolTable() && symbolTable()->captureCount()) {
495         out.printf(
496             "; %d captured var(s) (from r%d to r%d, inclusive)",
497             symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() - 1);
498     }
499     if (usesArguments()) {
500         out.printf(
501             "; uses arguments, in r%d, r%d",
502             argumentsRegister(),
503             unmodifiedArgumentsRegister(argumentsRegister()));
504     }
505     if (needsFullScopeChain() && codeType() == FunctionCode)
506         out.printf("; activation in r%d", activationRegister());
507
508     const Instruction* begin = instructions().begin();
509     const Instruction* end = instructions().end();
510     for (const Instruction* it = begin; it != end; ++it)
511         dumpBytecode(out, exec, begin, it);
512
513     if (numberOfIdentifiers()) {
514         out.printf("\nIdentifiers:\n");
515         size_t i = 0;
516         do {
517             out.printf("  id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
518             ++i;
519         } while (i != numberOfIdentifiers());
520     }
521
522     if (!m_constantRegisters.isEmpty()) {
523         out.printf("\nConstants:\n");
524         size_t i = 0;
525         do {
526             out.printf("   k%u = %s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data());
527             ++i;
528         } while (i < m_constantRegisters.size());
529     }
530
531     if (size_t count = m_unlinkedCode->numberOfRegExps()) {
532         out.printf("\nm_regexps:\n");
533         size_t i = 0;
534         do {
535             out.printf("  re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
536             ++i;
537         } while (i < count);
538     }
539
540 #if ENABLE(JIT)
541     if (!m_structureStubInfos.isEmpty())
542         out.printf("\nStructures:\n");
543 #endif
544
545     if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
546         out.printf("\nException Handlers:\n");
547         unsigned i = 0;
548         do {
549             out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
550             ++i;
551         } while (i < m_rareData->m_exceptionHandlers.size());
552     }
553     
554     if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
555         out.printf("Switch Jump Tables:\n");
556         unsigned i = 0;
557         do {
558             out.printf("  %1d = {\n", i);
559             int entry = 0;
560             Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
561             for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
562                 if (!*iter)
563                     continue;
564                 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
565             }
566             out.printf("      }\n");
567             ++i;
568         } while (i < m_rareData->m_switchJumpTables.size());
569     }
570     
571     if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
572         out.printf("\nString Switch Jump Tables:\n");
573         unsigned i = 0;
574         do {
575             out.printf("  %1d = {\n", i);
576             StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
577             for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
578                 out.printf("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
579             out.printf("      }\n");
580             ++i;
581         } while (i < m_rareData->m_stringSwitchJumpTables.size());
582     }
583
584     out.printf("\n");
585 }
586
587 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
588 {
589     if (hasPrintedProfiling) {
590         out.print("; ");
591         return;
592     }
593     
594     out.print("    ");
595     hasPrintedProfiling = true;
596 }
597
598 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
599 {
600     ConcurrentJITLocker locker(m_lock);
601     
602     ++it;
603 #if ENABLE(VALUE_PROFILER)
604     CString description = it->u.profile->briefDescription(locker);
605     if (!description.length())
606         return;
607     beginDumpProfiling(out, hasPrintedProfiling);
608     out.print(description);
609 #else
610     UNUSED_PARAM(out);
611     UNUSED_PARAM(hasPrintedProfiling);
612 #endif
613 }
614
615 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
616 {
617     ConcurrentJITLocker locker(m_lock);
618     
619     ++it;
620 #if ENABLE(VALUE_PROFILER)
621     if (!it->u.arrayProfile)
622         return;
623     CString description = it->u.arrayProfile->briefDescription(locker, this);
624     if (!description.length())
625         return;
626     beginDumpProfiling(out, hasPrintedProfiling);
627     out.print(description);
628 #else
629     UNUSED_PARAM(out);
630     UNUSED_PARAM(hasPrintedProfiling);
631 #endif
632 }
633
634 #if ENABLE(VALUE_PROFILER)
635 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
636 {
637     if (!profile || !profile->m_counter)
638         return;
639
640     beginDumpProfiling(out, hasPrintedProfiling);
641     out.print(name, profile->m_counter);
642 }
643 #endif
644
645 void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it)
646 {
647     int location = it - begin;
648     bool hasPrintedProfiling = false;
649     switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
650         case op_enter: {
651             out.printf("[%4d] enter", location);
652             break;
653         }
654         case op_create_activation: {
655             int r0 = (++it)->u.operand;
656             out.printf("[%4d] create_activation %s", location, registerName(r0).data());
657             break;
658         }
659         case op_create_arguments: {
660             int r0 = (++it)->u.operand;
661             out.printf("[%4d] create_arguments\t %s", location, registerName(r0).data());
662             break;
663         }
664         case op_init_lazy_reg: {
665             int r0 = (++it)->u.operand;
666             out.printf("[%4d] init_lazy_reg\t %s", location, registerName(r0).data());
667             break;
668         }
669         case op_get_callee: {
670             int r0 = (++it)->u.operand;
671             out.printf("[%4d] get_callee %s\n", location, registerName(r0).data());
672             ++it;
673             break;
674         }
675         case op_create_this: {
676             int r0 = (++it)->u.operand;
677             int r1 = (++it)->u.operand;
678             unsigned inferredInlineCapacity = (++it)->u.operand;
679             out.printf("[%4d] create_this %s, %s, %u", location, registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity);
680             break;
681         }
682         case op_to_this: {
683             int r0 = (++it)->u.operand;
684             out.printf("[%4d] to_this\t %s", location, registerName(r0).data());
685             ++it; // Skip value profile.
686             break;
687         }
688         case op_new_object: {
689             int r0 = (++it)->u.operand;
690             unsigned inferredInlineCapacity = (++it)->u.operand;
691             out.printf("[%4d] new_object\t %s, %u", location, registerName(r0).data(), inferredInlineCapacity);
692             ++it; // Skip object allocation profile.
693             break;
694         }
695         case op_new_array: {
696             int dst = (++it)->u.operand;
697             int argv = (++it)->u.operand;
698             int argc = (++it)->u.operand;
699             out.printf("[%4d] new_array\t %s, %s, %d", location, registerName(dst).data(), registerName(argv).data(), argc);
700             ++it; // Skip array allocation profile.
701             break;
702         }
703         case op_new_array_with_size: {
704             int dst = (++it)->u.operand;
705             int length = (++it)->u.operand;
706             out.printf("[%4d] new_array_with_size\t %s, %s", location, registerName(dst).data(), registerName(length).data());
707             ++it; // Skip array allocation profile.
708             break;
709         }
710         case op_new_array_buffer: {
711             int dst = (++it)->u.operand;
712             int argv = (++it)->u.operand;
713             int argc = (++it)->u.operand;
714             out.printf("[%4d] new_array_buffer\t %s, %d, %d", location, registerName(dst).data(), argv, argc);
715             ++it; // Skip array allocation profile.
716             break;
717         }
718         case op_new_regexp: {
719             int r0 = (++it)->u.operand;
720             int re0 = (++it)->u.operand;
721             out.printf("[%4d] new_regexp\t %s, ", location, registerName(r0).data());
722             if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
723                 out.printf("%s", regexpName(re0, regexp(re0)).data());
724             else
725                 out.printf("bad_regexp(%d)", re0);
726             break;
727         }
728         case op_mov: {
729             int r0 = (++it)->u.operand;
730             int r1 = (++it)->u.operand;
731             out.printf("[%4d] mov\t\t %s, %s", location, registerName(r0).data(), registerName(r1).data());
732             break;
733         }
734         case op_not: {
735             printUnaryOp(out, exec, location, it, "not");
736             break;
737         }
738         case op_eq: {
739             printBinaryOp(out, exec, location, it, "eq");
740             break;
741         }
742         case op_eq_null: {
743             printUnaryOp(out, exec, location, it, "eq_null");
744             break;
745         }
746         case op_neq: {
747             printBinaryOp(out, exec, location, it, "neq");
748             break;
749         }
750         case op_neq_null: {
751             printUnaryOp(out, exec, location, it, "neq_null");
752             break;
753         }
754         case op_stricteq: {
755             printBinaryOp(out, exec, location, it, "stricteq");
756             break;
757         }
758         case op_nstricteq: {
759             printBinaryOp(out, exec, location, it, "nstricteq");
760             break;
761         }
762         case op_less: {
763             printBinaryOp(out, exec, location, it, "less");
764             break;
765         }
766         case op_lesseq: {
767             printBinaryOp(out, exec, location, it, "lesseq");
768             break;
769         }
770         case op_greater: {
771             printBinaryOp(out, exec, location, it, "greater");
772             break;
773         }
774         case op_greatereq: {
775             printBinaryOp(out, exec, location, it, "greatereq");
776             break;
777         }
778         case op_inc: {
779             int r0 = (++it)->u.operand;
780             out.printf("[%4d] pre_inc\t\t %s", location, registerName(r0).data());
781             break;
782         }
783         case op_dec: {
784             int r0 = (++it)->u.operand;
785             out.printf("[%4d] pre_dec\t\t %s", location, registerName(r0).data());
786             break;
787         }
788         case op_to_number: {
789             printUnaryOp(out, exec, location, it, "to_number");
790             break;
791         }
792         case op_negate: {
793             printUnaryOp(out, exec, location, it, "negate");
794             break;
795         }
796         case op_add: {
797             printBinaryOp(out, exec, location, it, "add");
798             ++it;
799             break;
800         }
801         case op_mul: {
802             printBinaryOp(out, exec, location, it, "mul");
803             ++it;
804             break;
805         }
806         case op_div: {
807             printBinaryOp(out, exec, location, it, "div");
808             ++it;
809             break;
810         }
811         case op_mod: {
812             printBinaryOp(out, exec, location, it, "mod");
813             break;
814         }
815         case op_sub: {
816             printBinaryOp(out, exec, location, it, "sub");
817             ++it;
818             break;
819         }
820         case op_lshift: {
821             printBinaryOp(out, exec, location, it, "lshift");
822             break;            
823         }
824         case op_rshift: {
825             printBinaryOp(out, exec, location, it, "rshift");
826             break;
827         }
828         case op_urshift: {
829             printBinaryOp(out, exec, location, it, "urshift");
830             break;
831         }
832         case op_bitand: {
833             printBinaryOp(out, exec, location, it, "bitand");
834             ++it;
835             break;
836         }
837         case op_bitxor: {
838             printBinaryOp(out, exec, location, it, "bitxor");
839             ++it;
840             break;
841         }
842         case op_bitor: {
843             printBinaryOp(out, exec, location, it, "bitor");
844             ++it;
845             break;
846         }
847         case op_check_has_instance: {
848             int r0 = (++it)->u.operand;
849             int r1 = (++it)->u.operand;
850             int r2 = (++it)->u.operand;
851             int offset = (++it)->u.operand;
852             out.printf("[%4d] check_has_instance\t\t %s, %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
853             break;
854         }
855         case op_instanceof: {
856             int r0 = (++it)->u.operand;
857             int r1 = (++it)->u.operand;
858             int r2 = (++it)->u.operand;
859             out.printf("[%4d] instanceof\t\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
860             break;
861         }
862         case op_typeof: {
863             printUnaryOp(out, exec, location, it, "typeof");
864             break;
865         }
866         case op_is_undefined: {
867             printUnaryOp(out, exec, location, it, "is_undefined");
868             break;
869         }
870         case op_is_boolean: {
871             printUnaryOp(out, exec, location, it, "is_boolean");
872             break;
873         }
874         case op_is_number: {
875             printUnaryOp(out, exec, location, it, "is_number");
876             break;
877         }
878         case op_is_string: {
879             printUnaryOp(out, exec, location, it, "is_string");
880             break;
881         }
882         case op_is_object: {
883             printUnaryOp(out, exec, location, it, "is_object");
884             break;
885         }
886         case op_is_function: {
887             printUnaryOp(out, exec, location, it, "is_function");
888             break;
889         }
890         case op_in: {
891             printBinaryOp(out, exec, location, it, "in");
892             break;
893         }
894         case op_init_global_const_nop: {
895             out.printf("[%4d] init_global_const_nop\t", location);
896             it++;
897             it++;
898             it++;
899             it++;
900             break;
901         }
902         case op_init_global_const: {
903             WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
904             int r0 = (++it)->u.operand;
905             out.printf("[%4d] init_global_const\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(r0).data());
906             it++;
907             it++;
908             break;
909         }
910         case op_get_by_id:
911         case op_get_by_id_out_of_line:
912         case op_get_by_id_self:
913         case op_get_by_id_proto:
914         case op_get_by_id_chain:
915         case op_get_by_id_getter_self:
916         case op_get_by_id_getter_proto:
917         case op_get_by_id_getter_chain:
918         case op_get_by_id_custom_self:
919         case op_get_by_id_custom_proto:
920         case op_get_by_id_custom_chain:
921         case op_get_by_id_generic:
922         case op_get_array_length:
923         case op_get_string_length: {
924             printGetByIdOp(out, exec, location, it);
925             printGetByIdCacheStatus(out, exec, location);
926             dumpValueProfiling(out, it, hasPrintedProfiling);
927             break;
928         }
929         case op_get_arguments_length: {
930             printUnaryOp(out, exec, location, it, "get_arguments_length");
931             it++;
932             break;
933         }
934         case op_put_by_id: {
935             printPutByIdOp(out, exec, location, it, "put_by_id");
936             break;
937         }
938         case op_put_by_id_out_of_line: {
939             printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
940             break;
941         }
942         case op_put_by_id_replace: {
943             printPutByIdOp(out, exec, location, it, "put_by_id_replace");
944             break;
945         }
946         case op_put_by_id_transition: {
947             printPutByIdOp(out, exec, location, it, "put_by_id_transition");
948             break;
949         }
950         case op_put_by_id_transition_direct: {
951             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
952             break;
953         }
954         case op_put_by_id_transition_direct_out_of_line: {
955             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
956             break;
957         }
958         case op_put_by_id_transition_normal: {
959             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
960             break;
961         }
962         case op_put_by_id_transition_normal_out_of_line: {
963             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
964             break;
965         }
966         case op_put_by_id_generic: {
967             printPutByIdOp(out, exec, location, it, "put_by_id_generic");
968             break;
969         }
970         case op_put_getter_setter: {
971             int r0 = (++it)->u.operand;
972             int id0 = (++it)->u.operand;
973             int r1 = (++it)->u.operand;
974             int r2 = (++it)->u.operand;
975             out.printf("[%4d] put_getter_setter\t %s, %s, %s, %s", location, registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
976             break;
977         }
978         case op_del_by_id: {
979             int r0 = (++it)->u.operand;
980             int r1 = (++it)->u.operand;
981             int id0 = (++it)->u.operand;
982             out.printf("[%4d] del_by_id\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
983             break;
984         }
985         case op_get_by_val: {
986             int r0 = (++it)->u.operand;
987             int r1 = (++it)->u.operand;
988             int r2 = (++it)->u.operand;
989             out.printf("[%4d] get_by_val\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
990             dumpArrayProfiling(out, it, hasPrintedProfiling);
991             dumpValueProfiling(out, it, hasPrintedProfiling);
992             break;
993         }
994         case op_get_argument_by_val: {
995             int r0 = (++it)->u.operand;
996             int r1 = (++it)->u.operand;
997             int r2 = (++it)->u.operand;
998             out.printf("[%4d] get_argument_by_val\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
999             ++it;
1000             dumpValueProfiling(out, it, hasPrintedProfiling);
1001             break;
1002         }
1003         case op_get_by_pname: {
1004             int r0 = (++it)->u.operand;
1005             int r1 = (++it)->u.operand;
1006             int r2 = (++it)->u.operand;
1007             int r3 = (++it)->u.operand;
1008             int r4 = (++it)->u.operand;
1009             int r5 = (++it)->u.operand;
1010             out.printf("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data(), registerName(r5).data());
1011             break;
1012         }
1013         case op_put_by_val: {
1014             int r0 = (++it)->u.operand;
1015             int r1 = (++it)->u.operand;
1016             int r2 = (++it)->u.operand;
1017             out.printf("[%4d] put_by_val\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1018             dumpArrayProfiling(out, it, hasPrintedProfiling);
1019             break;
1020         }
1021         case op_del_by_val: {
1022             int r0 = (++it)->u.operand;
1023             int r1 = (++it)->u.operand;
1024             int r2 = (++it)->u.operand;
1025             out.printf("[%4d] del_by_val\t %s, %s, %s", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1026             break;
1027         }
1028         case op_put_by_index: {
1029             int r0 = (++it)->u.operand;
1030             unsigned n0 = (++it)->u.operand;
1031             int r1 = (++it)->u.operand;
1032             out.printf("[%4d] put_by_index\t %s, %u, %s", location, registerName(r0).data(), n0, registerName(r1).data());
1033             break;
1034         }
1035         case op_jmp: {
1036             int offset = (++it)->u.operand;
1037             out.printf("[%4d] jmp\t\t %d(->%d)", location, offset, location + offset);
1038             break;
1039         }
1040         case op_jtrue: {
1041             printConditionalJump(out, exec, begin, it, location, "jtrue");
1042             break;
1043         }
1044         case op_jfalse: {
1045             printConditionalJump(out, exec, begin, it, location, "jfalse");
1046             break;
1047         }
1048         case op_jeq_null: {
1049             printConditionalJump(out, exec, begin, it, location, "jeq_null");
1050             break;
1051         }
1052         case op_jneq_null: {
1053             printConditionalJump(out, exec, begin, it, location, "jneq_null");
1054             break;
1055         }
1056         case op_jneq_ptr: {
1057             int r0 = (++it)->u.operand;
1058             Special::Pointer pointer = (++it)->u.specialPointer;
1059             int offset = (++it)->u.operand;
1060             out.printf("[%4d] jneq_ptr\t\t %s, %d (%p), %d(->%d)", location, registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1061             break;
1062         }
1063         case op_jless: {
1064             int r0 = (++it)->u.operand;
1065             int r1 = (++it)->u.operand;
1066             int offset = (++it)->u.operand;
1067             out.printf("[%4d] jless\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1068             break;
1069         }
1070         case op_jlesseq: {
1071             int r0 = (++it)->u.operand;
1072             int r1 = (++it)->u.operand;
1073             int offset = (++it)->u.operand;
1074             out.printf("[%4d] jlesseq\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1075             break;
1076         }
1077         case op_jgreater: {
1078             int r0 = (++it)->u.operand;
1079             int r1 = (++it)->u.operand;
1080             int offset = (++it)->u.operand;
1081             out.printf("[%4d] jgreater\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1082             break;
1083         }
1084         case op_jgreatereq: {
1085             int r0 = (++it)->u.operand;
1086             int r1 = (++it)->u.operand;
1087             int offset = (++it)->u.operand;
1088             out.printf("[%4d] jgreatereq\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1089             break;
1090         }
1091         case op_jnless: {
1092             int r0 = (++it)->u.operand;
1093             int r1 = (++it)->u.operand;
1094             int offset = (++it)->u.operand;
1095             out.printf("[%4d] jnless\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1096             break;
1097         }
1098         case op_jnlesseq: {
1099             int r0 = (++it)->u.operand;
1100             int r1 = (++it)->u.operand;
1101             int offset = (++it)->u.operand;
1102             out.printf("[%4d] jnlesseq\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1103             break;
1104         }
1105         case op_jngreater: {
1106             int r0 = (++it)->u.operand;
1107             int r1 = (++it)->u.operand;
1108             int offset = (++it)->u.operand;
1109             out.printf("[%4d] jngreater\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1110             break;
1111         }
1112         case op_jngreatereq: {
1113             int r0 = (++it)->u.operand;
1114             int r1 = (++it)->u.operand;
1115             int offset = (++it)->u.operand;
1116             out.printf("[%4d] jngreatereq\t\t %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1117             break;
1118         }
1119         case op_loop_hint: {
1120             out.printf("[%4d] loop_hint", location);
1121             break;
1122         }
1123         case op_switch_imm: {
1124             int tableIndex = (++it)->u.operand;
1125             int defaultTarget = (++it)->u.operand;
1126             int scrutineeRegister = (++it)->u.operand;
1127             out.printf("[%4d] switch_imm\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1128             break;
1129         }
1130         case op_switch_char: {
1131             int tableIndex = (++it)->u.operand;
1132             int defaultTarget = (++it)->u.operand;
1133             int scrutineeRegister = (++it)->u.operand;
1134             out.printf("[%4d] switch_char\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1135             break;
1136         }
1137         case op_switch_string: {
1138             int tableIndex = (++it)->u.operand;
1139             int defaultTarget = (++it)->u.operand;
1140             int scrutineeRegister = (++it)->u.operand;
1141             out.printf("[%4d] switch_string\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1142             break;
1143         }
1144         case op_new_func: {
1145             int r0 = (++it)->u.operand;
1146             int f0 = (++it)->u.operand;
1147             int shouldCheck = (++it)->u.operand;
1148             out.printf("[%4d] new_func\t\t %s, f%d, %s", location, registerName(r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
1149             break;
1150         }
1151         case op_new_func_exp: {
1152             int r0 = (++it)->u.operand;
1153             int f0 = (++it)->u.operand;
1154             out.printf("[%4d] new_func_exp\t %s, f%d", location, registerName(r0).data(), f0);
1155             break;
1156         }
1157         case op_call: {
1158             printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling);
1159             break;
1160         }
1161         case op_call_eval: {
1162             printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling);
1163             break;
1164         }
1165         case op_call_varargs: {
1166             int result = (++it)->u.operand;
1167             int callee = (++it)->u.operand;
1168             int thisValue = (++it)->u.operand;
1169             int arguments = (++it)->u.operand;
1170             int firstFreeRegister = (++it)->u.operand;
1171             ++it;
1172             out.printf("[%4d] call_varargs\t %s, %s, %s, %s, %d", location, registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister);
1173             dumpValueProfiling(out, it, hasPrintedProfiling);
1174             break;
1175         }
1176         case op_tear_off_activation: {
1177             int r0 = (++it)->u.operand;
1178             out.printf("[%4d] tear_off_activation\t %s", location, registerName(r0).data());
1179             break;
1180         }
1181         case op_tear_off_arguments: {
1182             int r0 = (++it)->u.operand;
1183             int r1 = (++it)->u.operand;
1184             out.printf("[%4d] tear_off_arguments %s, %s", location, registerName(r0).data(), registerName(r1).data());
1185             break;
1186         }
1187         case op_ret: {
1188             int r0 = (++it)->u.operand;
1189             out.printf("[%4d] ret\t\t %s", location, registerName(r0).data());
1190             break;
1191         }
1192         case op_ret_object_or_this: {
1193             int r0 = (++it)->u.operand;
1194             int r1 = (++it)->u.operand;
1195             out.printf("[%4d] constructor_ret\t\t %s %s", location, registerName(r0).data(), registerName(r1).data());
1196             break;
1197         }
1198         case op_construct: {
1199             printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling);
1200             break;
1201         }
1202         case op_strcat: {
1203             int r0 = (++it)->u.operand;
1204             int r1 = (++it)->u.operand;
1205             int count = (++it)->u.operand;
1206             out.printf("[%4d] strcat\t\t %s, %s, %d", location, registerName(r0).data(), registerName(r1).data(), count);
1207             break;
1208         }
1209         case op_to_primitive: {
1210             int r0 = (++it)->u.operand;
1211             int r1 = (++it)->u.operand;
1212             out.printf("[%4d] to_primitive\t %s, %s", location, registerName(r0).data(), registerName(r1).data());
1213             break;
1214         }
1215         case op_get_pnames: {
1216             int r0 = it[1].u.operand;
1217             int r1 = it[2].u.operand;
1218             int r2 = it[3].u.operand;
1219             int r3 = it[4].u.operand;
1220             int offset = it[5].u.operand;
1221             out.printf("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)", location, registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), offset, location + offset);
1222             it += OPCODE_LENGTH(op_get_pnames) - 1;
1223             break;
1224         }
1225         case op_next_pname: {
1226             int dest = it[1].u.operand;
1227             int base = it[2].u.operand;
1228             int i = it[3].u.operand;
1229             int size = it[4].u.operand;
1230             int iter = it[5].u.operand;
1231             int offset = it[6].u.operand;
1232             out.printf("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)", location, registerName(dest).data(), registerName(base).data(), registerName(i).data(), registerName(size).data(), registerName(iter).data(), offset, location + offset);
1233             it += OPCODE_LENGTH(op_next_pname) - 1;
1234             break;
1235         }
1236         case op_push_with_scope: {
1237             int r0 = (++it)->u.operand;
1238             out.printf("[%4d] push_with_scope\t %s", location, registerName(r0).data());
1239             break;
1240         }
1241         case op_pop_scope: {
1242             out.printf("[%4d] pop_scope", location);
1243             break;
1244         }
1245         case op_push_name_scope: {
1246             int id0 = (++it)->u.operand;
1247             int r1 = (++it)->u.operand;
1248             unsigned attributes = (++it)->u.operand;
1249             out.printf("[%4d] push_name_scope \t%s, %s, %u", location, idName(id0, identifier(id0)).data(), registerName(r1).data(), attributes);
1250             break;
1251         }
1252         case op_catch: {
1253             int r0 = (++it)->u.operand;
1254             out.printf("[%4d] catch\t\t %s", location, registerName(r0).data());
1255             break;
1256         }
1257         case op_throw: {
1258             int r0 = (++it)->u.operand;
1259             out.printf("[%4d] throw\t\t %s", location, registerName(r0).data());
1260             break;
1261         }
1262         case op_throw_static_error: {
1263             int k0 = (++it)->u.operand;
1264             int k1 = (++it)->u.operand;
1265             out.printf("[%4d] throw_static_error\t %s, %s", location, constantName(k0, getConstant(k0)).data(), k1 ? "true" : "false");
1266             break;
1267         }
1268         case op_debug: {
1269             int debugHookID = (++it)->u.operand;
1270             int firstLine = (++it)->u.operand;
1271             int lastLine = (++it)->u.operand;
1272             int column = (++it)->u.operand;
1273             out.printf("[%4d] debug\t\t %s, %d, %d, %d", location, debugHookName(debugHookID), firstLine, lastLine, column);
1274             break;
1275         }
1276         case op_profile_will_call: {
1277             int function = (++it)->u.operand;
1278             out.printf("[%4d] profile_will_call %s", location, registerName(function).data());
1279             break;
1280         }
1281         case op_profile_did_call: {
1282             int function = (++it)->u.operand;
1283             out.printf("[%4d] profile_did_call\t %s", location, registerName(function).data());
1284             break;
1285         }
1286         case op_end: {
1287             int r0 = (++it)->u.operand;
1288             out.printf("[%4d] end\t\t %s", location, registerName(r0).data());
1289             break;
1290         }
1291         case op_resolve_scope: {
1292             int r0 = (++it)->u.operand;
1293             int id0 = (++it)->u.operand;
1294             int resolveModeAndType = (++it)->u.operand;
1295             ++it; // depth
1296             out.printf("[%4d] resolve_scope\t %s, %s, %d", location, registerName(r0).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
1297             break;
1298         }
1299         case op_get_from_scope: {
1300             int r0 = (++it)->u.operand;
1301             int r1 = (++it)->u.operand;
1302             int id0 = (++it)->u.operand;
1303             int resolveModeAndType = (++it)->u.operand;
1304             ++it; // Structure
1305             ++it; // Operand
1306             ++it; // Skip value profile.
1307             out.printf("[%4d] get_from_scope\t %s, %s, %s, %d", location, registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
1308             break;
1309         }
1310         case op_put_to_scope: {
1311             int r0 = (++it)->u.operand;
1312             int id0 = (++it)->u.operand;
1313             int r1 = (++it)->u.operand;
1314             int resolveModeAndType = (++it)->u.operand;
1315             ++it; // Structure
1316             ++it; // Operand
1317             out.printf("[%4d] put_to_scope\t %s, %s, %s, %d", location, registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), resolveModeAndType);
1318             break;
1319         }
1320 #if ENABLE(LLINT_C_LOOP)
1321         default:
1322             RELEASE_ASSERT_NOT_REACHED();
1323 #endif
1324     }
1325
1326 #if ENABLE(VALUE_PROFILER)
1327     dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1328     dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1329 #endif
1330     
1331 #if ENABLE(DFG_JIT)
1332     Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1333     if (!exitSites.isEmpty()) {
1334         out.print(" !! frequent exits: ");
1335         CommaPrinter comma;
1336         for (unsigned i = 0; i < exitSites.size(); ++i)
1337             out.print(comma, exitSites[i].kind());
1338     }
1339 #else // ENABLE(DFG_JIT)
1340     UNUSED_PARAM(location);
1341 #endif // ENABLE(DFG_JIT)
1342     out.print("\n");
1343 }
1344
1345 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
1346 {
1347     ExecState* exec = m_globalObject->globalExec();
1348     const Instruction* it = instructions().begin() + bytecodeOffset;
1349     dumpBytecode(out, exec, instructions().begin(), it);
1350 }
1351
1352 #if DUMP_CODE_BLOCK_STATISTICS
1353 static HashSet<CodeBlock*> liveCodeBlockSet;
1354 #endif
1355
1356 #define FOR_EACH_MEMBER_VECTOR(macro) \
1357     macro(instructions) \
1358     macro(structureStubInfos) \
1359     macro(callLinkInfos) \
1360     macro(linkedCallerList) \
1361     macro(identifiers) \
1362     macro(functionExpressions) \
1363     macro(constantRegisters)
1364
1365 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1366     macro(regexps) \
1367     macro(functions) \
1368     macro(exceptionHandlers) \
1369     macro(switchJumpTables) \
1370     macro(stringSwitchJumpTables) \
1371     macro(evalCodeCache) \
1372     macro(expressionInfo) \
1373     macro(lineInfo) \
1374     macro(callReturnIndexVector)
1375
1376 template<typename T>
1377 static size_t sizeInBytes(const Vector<T>& vector)
1378 {
1379     return vector.capacity() * sizeof(T);
1380 }
1381
1382 void CodeBlock::dumpStatistics()
1383 {
1384 #if DUMP_CODE_BLOCK_STATISTICS
1385     #define DEFINE_VARS(name) size_t name##IsNotEmpty = 0; size_t name##TotalSize = 0;
1386         FOR_EACH_MEMBER_VECTOR(DEFINE_VARS)
1387         FOR_EACH_MEMBER_VECTOR_RARE_DATA(DEFINE_VARS)
1388     #undef DEFINE_VARS
1389
1390     // Non-vector data members
1391     size_t evalCodeCacheIsNotEmpty = 0;
1392
1393     size_t symbolTableIsNotEmpty = 0;
1394     size_t symbolTableTotalSize = 0;
1395
1396     size_t hasRareData = 0;
1397
1398     size_t isFunctionCode = 0;
1399     size_t isGlobalCode = 0;
1400     size_t isEvalCode = 0;
1401
1402     HashSet<CodeBlock*>::const_iterator end = liveCodeBlockSet.end();
1403     for (HashSet<CodeBlock*>::const_iterator it = liveCodeBlockSet.begin(); it != end; ++it) {
1404         CodeBlock* codeBlock = *it;
1405
1406         #define GET_STATS(name) if (!codeBlock->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_##name); }
1407             FOR_EACH_MEMBER_VECTOR(GET_STATS)
1408         #undef GET_STATS
1409
1410         if (codeBlock->symbolTable() && !codeBlock->symbolTable()->isEmpty()) {
1411             symbolTableIsNotEmpty++;
1412             symbolTableTotalSize += (codeBlock->symbolTable()->capacity() * (sizeof(SymbolTable::KeyType) + sizeof(SymbolTable::MappedType)));
1413         }
1414
1415         if (codeBlock->m_rareData) {
1416             hasRareData++;
1417             #define GET_STATS(name) if (!codeBlock->m_rareData->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_rareData->m_##name); }
1418                 FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_STATS)
1419             #undef GET_STATS
1420
1421             if (!codeBlock->m_rareData->m_evalCodeCache.isEmpty())
1422                 evalCodeCacheIsNotEmpty++;
1423         }
1424
1425         switch (codeBlock->codeType()) {
1426             case FunctionCode:
1427                 ++isFunctionCode;
1428                 break;
1429             case GlobalCode:
1430                 ++isGlobalCode;
1431                 break;
1432             case EvalCode:
1433                 ++isEvalCode;
1434                 break;
1435         }
1436     }
1437
1438     size_t totalSize = 0;
1439
1440     #define GET_TOTAL_SIZE(name) totalSize += name##TotalSize;
1441         FOR_EACH_MEMBER_VECTOR(GET_TOTAL_SIZE)
1442         FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_TOTAL_SIZE)
1443     #undef GET_TOTAL_SIZE
1444
1445     totalSize += symbolTableTotalSize;
1446     totalSize += (liveCodeBlockSet.size() * sizeof(CodeBlock));
1447
1448     dataLogF("Number of live CodeBlocks: %d\n", liveCodeBlockSet.size());
1449     dataLogF("Size of a single CodeBlock [sizeof(CodeBlock)]: %zu\n", sizeof(CodeBlock));
1450     dataLogF("Size of all CodeBlocks: %zu\n", totalSize);
1451     dataLogF("Average size of a CodeBlock: %zu\n", totalSize / liveCodeBlockSet.size());
1452
1453     dataLogF("Number of FunctionCode CodeBlocks: %zu (%.3f%%)\n", isFunctionCode, static_cast<double>(isFunctionCode) * 100.0 / liveCodeBlockSet.size());
1454     dataLogF("Number of GlobalCode CodeBlocks: %zu (%.3f%%)\n", isGlobalCode, static_cast<double>(isGlobalCode) * 100.0 / liveCodeBlockSet.size());
1455     dataLogF("Number of EvalCode CodeBlocks: %zu (%.3f%%)\n", isEvalCode, static_cast<double>(isEvalCode) * 100.0 / liveCodeBlockSet.size());
1456
1457     dataLogF("Number of CodeBlocks with rare data: %zu (%.3f%%)\n", hasRareData, static_cast<double>(hasRareData) * 100.0 / liveCodeBlockSet.size());
1458
1459     #define PRINT_STATS(name) dataLogF("Number of CodeBlocks with " #name ": %zu\n", name##IsNotEmpty); dataLogF("Size of all " #name ": %zu\n", name##TotalSize); 
1460         FOR_EACH_MEMBER_VECTOR(PRINT_STATS)
1461         FOR_EACH_MEMBER_VECTOR_RARE_DATA(PRINT_STATS)
1462     #undef PRINT_STATS
1463
1464     dataLogF("Number of CodeBlocks with evalCodeCache: %zu\n", evalCodeCacheIsNotEmpty);
1465     dataLogF("Number of CodeBlocks with symbolTable: %zu\n", symbolTableIsNotEmpty);
1466
1467     dataLogF("Size of all symbolTables: %zu\n", symbolTableTotalSize);
1468
1469 #else
1470     dataLogF("Dumping CodeBlock statistics is not enabled.\n");
1471 #endif
1472 }
1473
1474 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1475     : m_globalObject(other.m_globalObject)
1476     , m_heap(other.m_heap)
1477     , m_numCalleeRegisters(other.m_numCalleeRegisters)
1478     , m_numVars(other.m_numVars)
1479     , m_isConstructor(other.m_isConstructor)
1480     , m_shouldAlwaysBeInlined(true)
1481     , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1482     , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1483     , m_vm(other.m_vm)
1484     , m_instructions(other.m_instructions)
1485     , m_thisRegister(other.m_thisRegister)
1486     , m_argumentsRegister(other.m_argumentsRegister)
1487     , m_activationRegister(other.m_activationRegister)
1488     , m_isStrictMode(other.m_isStrictMode)
1489     , m_needsActivation(other.m_needsActivation)
1490     , m_source(other.m_source)
1491     , m_sourceOffset(other.m_sourceOffset)
1492     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1493     , m_codeType(other.m_codeType)
1494     , m_additionalIdentifiers(other.m_additionalIdentifiers)
1495     , m_constantRegisters(other.m_constantRegisters)
1496     , m_functionDecls(other.m_functionDecls)
1497     , m_functionExprs(other.m_functionExprs)
1498     , m_osrExitCounter(0)
1499     , m_optimizationDelayCounter(0)
1500     , m_reoptimizationRetryCounter(0)
1501     , m_hash(other.m_hash)
1502 #if ENABLE(JIT)
1503     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1504 #endif
1505 {
1506     setNumParameters(other.numParameters());
1507     optimizeAfterWarmUp();
1508     jitAfterWarmUp();
1509
1510     if (other.m_rareData) {
1511         createRareDataIfNecessary();
1512         
1513         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1514         m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1515         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1516         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1517     }
1518 }
1519
1520 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1521     : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1522     , m_heap(&m_globalObject->vm().heap)
1523     , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1524     , m_numVars(unlinkedCodeBlock->m_numVars)
1525     , m_isConstructor(unlinkedCodeBlock->isConstructor())
1526     , m_shouldAlwaysBeInlined(true)
1527     , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1528     , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1529     , m_vm(unlinkedCodeBlock->vm())
1530     , m_thisRegister(unlinkedCodeBlock->thisRegister())
1531     , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
1532     , m_activationRegister(unlinkedCodeBlock->activationRegister())
1533     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1534     , m_needsActivation(unlinkedCodeBlock->needsFullScopeChain() && unlinkedCodeBlock->codeType() == FunctionCode)
1535     , m_source(sourceProvider)
1536     , m_sourceOffset(sourceOffset)
1537     , m_firstLineColumnOffset(firstLineColumnOffset)
1538     , m_codeType(unlinkedCodeBlock->codeType())
1539     , m_osrExitCounter(0)
1540     , m_optimizationDelayCounter(0)
1541     , m_reoptimizationRetryCounter(0)
1542 #if ENABLE(JIT)
1543     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1544 #endif
1545 {
1546     m_vm->startedCompiling(this);
1547
1548     ASSERT(m_source);
1549     setNumParameters(unlinkedCodeBlock->numParameters());
1550
1551 #if DUMP_CODE_BLOCK_STATISTICS
1552     liveCodeBlockSet.add(this);
1553 #endif
1554
1555     setConstantRegisters(unlinkedCodeBlock->constantRegisters());
1556     if (unlinkedCodeBlock->usesGlobalObject())
1557         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister()].set(*m_vm, ownerExecutable, m_globalObject.get());
1558     m_functionDecls.grow(unlinkedCodeBlock->numberOfFunctionDecls());
1559     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1560         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1561         unsigned lineCount = unlinkedExecutable->lineCount();
1562         unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1563         unsigned startColumn = unlinkedExecutable->functionStartColumn();
1564         startColumn += (unlinkedExecutable->firstLineOffset() ? 1 : ownerExecutable->startColumn());
1565         unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1566         unsigned sourceLength = unlinkedExecutable->sourceLength();
1567         SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1568         FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn);
1569         m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
1570     }
1571
1572     m_functionExprs.grow(unlinkedCodeBlock->numberOfFunctionExprs());
1573     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1574         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1575         unsigned lineCount = unlinkedExecutable->lineCount();
1576         unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1577         unsigned startColumn = unlinkedExecutable->functionStartColumn();
1578         startColumn += (unlinkedExecutable->firstLineOffset() ? 1 : ownerExecutable->startColumn());
1579         unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1580         unsigned sourceLength = unlinkedExecutable->sourceLength();
1581         SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1582         FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn);
1583         m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
1584     }
1585
1586     if (unlinkedCodeBlock->hasRareData()) {
1587         createRareDataIfNecessary();
1588         if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1589             m_rareData->m_constantBuffers.grow(count);
1590             for (size_t i = 0; i < count; i++) {
1591                 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1592                 m_rareData->m_constantBuffers[i] = buffer;
1593             }
1594         }
1595         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1596             m_rareData->m_exceptionHandlers.grow(count);
1597             size_t nonLocalScopeDepth = scope->depth();
1598             for (size_t i = 0; i < count; i++) {
1599                 const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
1600                 m_rareData->m_exceptionHandlers[i].start = handler.start;
1601                 m_rareData->m_exceptionHandlers[i].end = handler.end;
1602                 m_rareData->m_exceptionHandlers[i].target = handler.target;
1603                 m_rareData->m_exceptionHandlers[i].scopeDepth = nonLocalScopeDepth + handler.scopeDepth;
1604 #if ENABLE(JIT) && ENABLE(LLINT)
1605                 m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch)));
1606 #endif
1607             }
1608         }
1609
1610         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1611             m_rareData->m_stringSwitchJumpTables.grow(count);
1612             for (size_t i = 0; i < count; i++) {
1613                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1614                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1615                 for (; ptr != end; ++ptr) {
1616                     OffsetLocation offset;
1617                     offset.branchOffset = ptr->value;
1618                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1619                 }
1620             }
1621         }
1622
1623         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1624             m_rareData->m_switchJumpTables.grow(count);
1625             for (size_t i = 0; i < count; i++) {
1626                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1627                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1628                 destTable.branchOffsets = sourceTable.branchOffsets;
1629                 destTable.min = sourceTable.min;
1630             }
1631         }
1632     }
1633
1634     // Allocate metadata buffers for the bytecode
1635 #if ENABLE(LLINT)
1636     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1637         m_llintCallLinkInfos.grow(size);
1638 #endif
1639 #if ENABLE(DFG_JIT)
1640     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1641         m_arrayProfiles.grow(size);
1642     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1643         m_arrayAllocationProfiles.grow(size);
1644     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1645         m_valueProfiles.grow(size);
1646 #endif
1647     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1648         m_objectAllocationProfiles.grow(size);
1649
1650     // Copy and translate the UnlinkedInstructions
1651     size_t instructionCount = unlinkedCodeBlock->instructions().size();
1652     UnlinkedInstruction* pc = unlinkedCodeBlock->instructions().data();
1653     Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1654     for (size_t i = 0; i < unlinkedCodeBlock->instructions().size(); ) {
1655         unsigned opLength = opcodeLength(pc[i].u.opcode);
1656         instructions[i] = vm()->interpreter->getOpcode(pc[i].u.opcode);
1657         for (size_t j = 1; j < opLength; ++j) {
1658             if (sizeof(int32_t) != sizeof(intptr_t))
1659                 instructions[i + j].u.pointer = 0;
1660             instructions[i + j].u.operand = pc[i + j].u.operand;
1661         }
1662         switch (pc[i].u.opcode) {
1663 #if ENABLE(DFG_JIT)
1664         case op_get_by_val:
1665         case op_get_argument_by_val: {
1666             int arrayProfileIndex = pc[i + opLength - 2].u.operand;
1667             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1668
1669             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1670             // fallthrough
1671         }
1672         case op_to_this:
1673         case op_get_by_id:
1674         case op_call_varargs:
1675         case op_get_callee: {
1676             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1677             ASSERT(profile->m_bytecodeOffset == -1);
1678             profile->m_bytecodeOffset = i;
1679             instructions[i + opLength - 1] = profile;
1680             break;
1681         }
1682         case op_put_by_val: {
1683             int arrayProfileIndex = pc[i + opLength - 1].u.operand;
1684             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1685             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1686             break;
1687         }
1688
1689         case op_new_array:
1690         case op_new_array_buffer:
1691         case op_new_array_with_size: {
1692             int arrayAllocationProfileIndex = pc[i + opLength - 1].u.operand;
1693             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1694             break;
1695         }
1696 #endif
1697         case op_new_object: {
1698             int objectAllocationProfileIndex = pc[i + opLength - 1].u.operand;
1699             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1700             int inferredInlineCapacity = pc[i + opLength - 2].u.operand;
1701
1702             instructions[i + opLength - 1] = objectAllocationProfile;
1703             objectAllocationProfile->initialize(*vm(),
1704                 m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
1705             break;
1706         }
1707
1708         case op_call:
1709         case op_call_eval: {
1710 #if ENABLE(DFG_JIT)
1711             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1712             ASSERT(profile->m_bytecodeOffset == -1);
1713             profile->m_bytecodeOffset = i;
1714             instructions[i + opLength - 1] = profile;
1715             int arrayProfileIndex = pc[i + opLength - 2].u.operand;
1716             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1717             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1718 #endif
1719 #if ENABLE(LLINT)
1720             instructions[i + 5] = &m_llintCallLinkInfos[pc[i + 5].u.operand];
1721 #endif
1722             break;
1723         }
1724         case op_construct: {
1725 #if ENABLE(LLINT)
1726             instructions[i + 5] = &m_llintCallLinkInfos[pc[i + 5].u.operand];
1727 #endif
1728 #if ENABLE(DFG_JIT)
1729             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1730             ASSERT(profile->m_bytecodeOffset == -1);
1731             profile->m_bytecodeOffset = i;
1732             instructions[i + opLength - 1] = profile;
1733 #endif
1734             break;
1735         }
1736         case op_get_by_id_out_of_line:
1737         case op_get_by_id_self:
1738         case op_get_by_id_proto:
1739         case op_get_by_id_chain:
1740         case op_get_by_id_getter_self:
1741         case op_get_by_id_getter_proto:
1742         case op_get_by_id_getter_chain:
1743         case op_get_by_id_custom_self:
1744         case op_get_by_id_custom_proto:
1745         case op_get_by_id_custom_chain:
1746         case op_get_by_id_generic:
1747         case op_get_array_length:
1748         case op_get_string_length:
1749             CRASH();
1750
1751         case op_init_global_const_nop: {
1752             ASSERT(codeType() == GlobalCode);
1753             Identifier ident = identifier(pc[i + 4].u.operand);
1754             SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
1755             if (entry.isNull())
1756                 break;
1757
1758             // It's likely that we'll write to this var, so notify now and avoid the overhead of doing so at runtime.
1759             entry.notifyWrite();
1760
1761             instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
1762             instructions[i + 1] = &m_globalObject->registerAt(entry.getIndex());
1763             break;
1764         }
1765
1766         case op_resolve_scope: {
1767             const Identifier& ident = identifier(pc[i + 2].u.operand);
1768             ResolveType type = static_cast<ResolveType>(pc[i + 3].u.operand);
1769
1770             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, type);
1771             instructions[i + 3].u.operand = op.type;
1772             instructions[i + 4].u.operand = op.depth;
1773             break;
1774         }
1775
1776         case op_get_from_scope: {
1777 #if ENABLE(VALUE_PROFILER)
1778             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1779             ASSERT(profile->m_bytecodeOffset == -1);
1780             profile->m_bytecodeOffset = i;
1781             instructions[i + opLength - 1] = profile;
1782 #endif
1783
1784             // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
1785             const Identifier& ident = identifier(pc[i + 3].u.operand);
1786             ResolveModeAndType modeAndType = ResolveModeAndType(pc[i + 4].u.operand);
1787             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, modeAndType.type());
1788
1789             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1790             if (op.structure)
1791                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1792             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1793             break;
1794         }
1795
1796         case op_put_to_scope: {
1797             // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
1798             const Identifier& ident = identifier(pc[i + 2].u.operand);
1799             ResolveModeAndType modeAndType = ResolveModeAndType(pc[i + 4].u.operand);
1800             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Put, modeAndType.type());
1801
1802             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1803             if (op.structure)
1804                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1805             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1806             break;
1807         }
1808
1809         case op_debug: {
1810             instructions[i + 4] = columnNumberForBytecodeOffset(i);
1811             break;
1812         }
1813
1814         default:
1815             break;
1816         }
1817         i += opLength;
1818     }
1819     m_instructions = WTF::RefCountedArray<Instruction>(instructions);
1820
1821     // Set optimization thresholds only after m_instructions is initialized, since these
1822     // rely on the instruction count (and are in theory permitted to also inspect the
1823     // instruction stream to more accurate assess the cost of tier-up).
1824     optimizeAfterWarmUp();
1825     jitAfterWarmUp();
1826
1827     // If the concurrent thread will want the code block's hash, then compute it here
1828     // synchronously.
1829     if (Options::showDisassembly()
1830         || Options::showDFGDisassembly()
1831         || Options::dumpBytecodeAtDFGTime()
1832         || Options::verboseCompilation()
1833         || Options::logCompilationChanges()
1834         || Options::validateGraph()
1835         || Options::validateGraphAtEachPhase()
1836         || Options::verboseOSR()
1837         || Options::verboseCompilationQueue()
1838         || Options::reportCompileTimes()
1839         || Options::verboseCFA())
1840         hash();
1841
1842     if (Options::dumpGeneratedBytecodes())
1843         dumpBytecode();
1844     m_vm->finishedCompiling(this);
1845 }
1846
1847 CodeBlock::~CodeBlock()
1848 {
1849     if (m_vm->m_perBytecodeProfiler)
1850         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
1851     
1852 #if ENABLE(DFG_JIT)
1853     // Remove myself from the set of DFG code blocks. Note that I may not be in this set
1854     // (because I'm not a DFG code block), in which case this is a no-op anyway.
1855     m_vm->heap.m_dfgCodeBlocks.m_set.remove(this);
1856 #endif
1857     
1858 #if ENABLE(VERBOSE_VALUE_PROFILE)
1859     dumpValueProfiles();
1860 #endif
1861
1862 #if ENABLE(LLINT)    
1863     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1864         m_incomingLLIntCalls.begin()->remove();
1865 #endif // ENABLE(LLINT)
1866 #if ENABLE(JIT)
1867     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
1868     // Consider that two CodeBlocks become unreachable at the same time. There
1869     // is no guarantee about the order in which the CodeBlocks are destroyed.
1870     // So, if we don't remove incoming calls, and get destroyed before the
1871     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
1872     // destructor will try to remove nodes from our (no longer valid) linked list.
1873     while (m_incomingCalls.begin() != m_incomingCalls.end())
1874         m_incomingCalls.begin()->remove();
1875     
1876     // Note that our outgoing calls will be removed from other CodeBlocks'
1877     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
1878     // destructors.
1879
1880     for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i)
1881         m_structureStubInfos[i].deref();
1882 #endif // ENABLE(JIT)
1883
1884 #if DUMP_CODE_BLOCK_STATISTICS
1885     liveCodeBlockSet.remove(this);
1886 #endif
1887 }
1888
1889 void CodeBlock::setNumParameters(int newValue)
1890 {
1891     m_numParameters = newValue;
1892
1893 #if ENABLE(VALUE_PROFILER)
1894     m_argumentValueProfiles.resizeToFit(newValue);
1895 #endif
1896 }
1897
1898 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
1899 {
1900     EvalCacheMap::iterator end = m_cacheMap.end();
1901     for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
1902         visitor.append(&ptr->value);
1903 }
1904
1905 void CodeBlock::visitAggregate(SlotVisitor& visitor)
1906 {
1907 #if ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
1908     if (JITCode::isOptimizingJIT(jitType())) {
1909         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
1910         
1911         // I may be asked to scan myself more than once, and it may even happen concurrently.
1912         // To this end, use a CAS loop to check if I've been called already. Only one thread
1913         // may proceed past this point - whichever one wins the CAS race.
1914         unsigned oldValue;
1915         do {
1916             oldValue = dfgCommon->visitAggregateHasBeenCalled;
1917             if (oldValue) {
1918                 // Looks like someone else won! Return immediately to ensure that we don't
1919                 // trace the same CodeBlock concurrently. Doing so is hazardous since we will
1920                 // be mutating the state of ValueProfiles, which contain JSValues, which can
1921                 // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
1922                 // that are nearly impossible to track down.
1923                 
1924                 // Also note that it must be safe to return early as soon as we see the
1925                 // value true (well, (unsigned)1), since once a GC thread is in this method
1926                 // and has won the CAS race (i.e. was responsible for setting the value true)
1927                 // it will definitely complete the rest of this method before declaring
1928                 // termination.
1929                 return;
1930             }
1931         } while (!WTF::weakCompareAndSwap(&dfgCommon->visitAggregateHasBeenCalled, 0, 1));
1932     }
1933 #endif // ENABLE(PARALLEL_GC) && ENABLE(DFG_JIT)
1934     
1935     if (!!m_alternative)
1936         m_alternative->visitAggregate(visitor);
1937
1938     visitor.append(&m_unlinkedCode);
1939
1940     // There are three things that may use unconditional finalizers: lazy bytecode freeing,
1941     // inline cache clearing, and jettisoning. The probability of us wanting to do at
1942     // least one of those things is probably quite close to 1. So we add one no matter what
1943     // and when it runs, it figures out whether it has any work to do.
1944     visitor.addUnconditionalFinalizer(this);
1945     
1946     // There are two things that we use weak reference harvesters for: DFG fixpoint for
1947     // jettisoning, and trying to find structures that would be live based on some
1948     // inline cache. So it makes sense to register them regardless.
1949     visitor.addWeakReferenceHarvester(this);
1950     m_allTransitionsHaveBeenMarked = false;
1951     
1952     if (shouldImmediatelyAssumeLivenessDuringScan()) {
1953         // This code block is live, so scan all references strongly and return.
1954         stronglyVisitStrongReferences(visitor);
1955         stronglyVisitWeakReferences(visitor);
1956         propagateTransitions(visitor);
1957         return;
1958     }
1959     
1960 #if ENABLE(DFG_JIT)
1961     // We get here if we're live in the sense that our owner executable is live,
1962     // but we're not yet live for sure in another sense: we may yet decide that this
1963     // code block should be jettisoned based on its outgoing weak references being
1964     // stale. Set a flag to indicate that we're still assuming that we're dead, and
1965     // perform one round of determining if we're live. The GC may determine, based on
1966     // either us marking additional objects, or by other objects being marked for
1967     // other reasons, that this iteration should run again; it will notify us of this
1968     // decision by calling harvestWeakReferences().
1969     
1970     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
1971     
1972     propagateTransitions(visitor);
1973     determineLiveness(visitor);
1974 #else // ENABLE(DFG_JIT)
1975     RELEASE_ASSERT_NOT_REACHED();
1976 #endif // ENABLE(DFG_JIT)
1977 }
1978
1979 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
1980 {
1981     UNUSED_PARAM(visitor);
1982
1983     if (m_allTransitionsHaveBeenMarked)
1984         return;
1985
1986     bool allAreMarkedSoFar = true;
1987         
1988 #if ENABLE(LLINT)
1989     Interpreter* interpreter = m_vm->interpreter;
1990     if (jitType() == JITCode::InterpreterThunk) {
1991         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
1992         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
1993             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
1994             switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
1995             case op_put_by_id_transition_direct:
1996             case op_put_by_id_transition_normal:
1997             case op_put_by_id_transition_direct_out_of_line:
1998             case op_put_by_id_transition_normal_out_of_line: {
1999                 if (Heap::isMarked(instruction[4].u.structure.get()))
2000                     visitor.append(&instruction[6].u.structure);
2001                 else
2002                     allAreMarkedSoFar = false;
2003                 break;
2004             }
2005             default:
2006                 break;
2007             }
2008         }
2009     }
2010 #endif // ENABLE(LLINT)
2011
2012 #if ENABLE(JIT)
2013     if (JITCode::isJIT(jitType())) {
2014         for (unsigned i = 0; i < m_structureStubInfos.size(); ++i) {
2015             StructureStubInfo& stubInfo = m_structureStubInfos[i];
2016             switch (stubInfo.accessType) {
2017             case access_put_by_id_transition_normal:
2018             case access_put_by_id_transition_direct: {
2019                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2020                 if ((!origin || Heap::isMarked(origin))
2021                     && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
2022                     visitor.append(&stubInfo.u.putByIdTransition.structure);
2023                 else
2024                     allAreMarkedSoFar = false;
2025                 break;
2026             }
2027
2028             case access_put_by_id_list: {
2029                 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
2030                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2031                 if (origin && !Heap::isMarked(origin)) {
2032                     allAreMarkedSoFar = false;
2033                     break;
2034                 }
2035                 for (unsigned j = list->size(); j--;) {
2036                     PutByIdAccess& access = list->m_list[j];
2037                     if (!access.isTransition())
2038                         continue;
2039                     if (Heap::isMarked(access.oldStructure()))
2040                         visitor.append(&access.m_newStructure);
2041                     else
2042                         allAreMarkedSoFar = false;
2043                 }
2044                 break;
2045             }
2046             
2047             default:
2048                 break;
2049             }
2050         }
2051     }
2052 #endif // ENABLE(JIT)
2053     
2054 #if ENABLE(DFG_JIT)
2055     if (JITCode::isOptimizingJIT(jitType())) {
2056         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2057         for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2058             if ((!dfgCommon->transitions[i].m_codeOrigin
2059                  || Heap::isMarked(dfgCommon->transitions[i].m_codeOrigin.get()))
2060                 && Heap::isMarked(dfgCommon->transitions[i].m_from.get())) {
2061                 // If the following three things are live, then the target of the
2062                 // transition is also live:
2063                 // - This code block. We know it's live already because otherwise
2064                 //   we wouldn't be scanning ourselves.
2065                 // - The code origin of the transition. Transitions may arise from
2066                 //   code that was inlined. They are not relevant if the user's
2067                 //   object that is required for the inlinee to run is no longer
2068                 //   live.
2069                 // - The source of the transition. The transition checks if some
2070                 //   heap location holds the source, and if so, stores the target.
2071                 //   Hence the source must be live for the transition to be live.
2072                 visitor.append(&dfgCommon->transitions[i].m_to);
2073             } else
2074                 allAreMarkedSoFar = false;
2075         }
2076     }
2077 #endif // ENABLE(DFG_JIT)
2078     
2079     if (allAreMarkedSoFar)
2080         m_allTransitionsHaveBeenMarked = true;
2081 }
2082
2083 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2084 {
2085     UNUSED_PARAM(visitor);
2086     
2087     if (shouldImmediatelyAssumeLivenessDuringScan())
2088         return;
2089     
2090 #if ENABLE(DFG_JIT)
2091     // Check if we have any remaining work to do.
2092     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2093     if (dfgCommon->livenessHasBeenProved)
2094         return;
2095     
2096     // Now check all of our weak references. If all of them are live, then we
2097     // have proved liveness and so we scan our strong references. If at end of
2098     // GC we still have not proved liveness, then this code block is toast.
2099     bool allAreLiveSoFar = true;
2100     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2101         if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2102             allAreLiveSoFar = false;
2103             break;
2104         }
2105     }
2106     
2107     // If some weak references are dead, then this fixpoint iteration was
2108     // unsuccessful.
2109     if (!allAreLiveSoFar)
2110         return;
2111     
2112     // All weak references are live. Record this information so we don't
2113     // come back here again, and scan the strong references.
2114     dfgCommon->livenessHasBeenProved = true;
2115     stronglyVisitStrongReferences(visitor);
2116 #endif // ENABLE(DFG_JIT)
2117 }
2118
2119 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2120 {
2121     propagateTransitions(visitor);
2122     determineLiveness(visitor);
2123 }
2124
2125 void CodeBlock::finalizeUnconditionally()
2126 {
2127 #if ENABLE(LLINT)
2128     Interpreter* interpreter = m_vm->interpreter;
2129     if (JITCode::couldBeInterpreted(jitType())) {
2130         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2131         for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2132             Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2133             switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2134             case op_get_by_id:
2135             case op_get_by_id_out_of_line:
2136             case op_put_by_id:
2137             case op_put_by_id_out_of_line:
2138                 if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2139                     break;
2140                 if (Options::verboseOSR())
2141                     dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2142                 curInstruction[4].u.structure.clear();
2143                 curInstruction[5].u.operand = 0;
2144                 break;
2145             case op_put_by_id_transition_direct:
2146             case op_put_by_id_transition_normal:
2147             case op_put_by_id_transition_direct_out_of_line:
2148             case op_put_by_id_transition_normal_out_of_line:
2149                 if (Heap::isMarked(curInstruction[4].u.structure.get())
2150                     && Heap::isMarked(curInstruction[6].u.structure.get())
2151                     && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2152                     break;
2153                 if (Options::verboseOSR()) {
2154                     dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2155                             curInstruction[4].u.structure.get(),
2156                             curInstruction[6].u.structure.get(),
2157                             curInstruction[7].u.structureChain.get());
2158                 }
2159                 curInstruction[4].u.structure.clear();
2160                 curInstruction[6].u.structure.clear();
2161                 curInstruction[7].u.structureChain.clear();
2162                 curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2163                 break;
2164             case op_get_array_length:
2165                 break;
2166             case op_get_from_scope:
2167             case op_put_to_scope: {
2168                 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2169                 if (!structure || Heap::isMarked(structure.get()))
2170                     break;
2171                 if (Options::verboseOSR())
2172                     dataLogF("Clearing LLInt scope access with structure %p.\n", structure.get());
2173                 structure.clear();
2174                 break;
2175             }
2176             default:
2177                 RELEASE_ASSERT_NOT_REACHED();
2178             }
2179         }
2180
2181         for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2182             if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2183                 if (Options::verboseOSR())
2184                     dataLog("Clearing LLInt call from ", *this, "\n");
2185                 m_llintCallLinkInfos[i].unlink();
2186             }
2187             if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2188                 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2189         }
2190     }
2191 #endif // ENABLE(LLINT)
2192
2193 #if ENABLE(DFG_JIT)
2194     // Check if we're not live. If we are, then jettison.
2195     if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_jitCode->dfgCommon()->livenessHasBeenProved)) {
2196         if (Options::verboseOSR())
2197             dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2198
2199         if (DFG::shouldShowDisassembly()) {
2200             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2201             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2202             for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2203                 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
2204                 JSCell* origin = transition.m_codeOrigin.get();
2205                 JSCell* from = transition.m_from.get();
2206                 JSCell* to = transition.m_to.get();
2207                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2208                     continue;
2209                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2210             }
2211             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2212                 JSCell* weak = dfgCommon->weakReferences[i].get();
2213                 if (Heap::isMarked(weak))
2214                     continue;
2215                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2216             }
2217         }
2218         
2219         jettison();
2220         return;
2221     }
2222 #endif // ENABLE(DFG_JIT)
2223
2224 #if ENABLE(JIT)
2225     // Handle inline caches.
2226     if (!!jitCode()) {
2227         RepatchBuffer repatchBuffer(this);
2228         for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
2229             if (callLinkInfo(i).isLinked()) {
2230                 if (ClosureCallStubRoutine* stub = callLinkInfo(i).stub.get()) {
2231                     if (!Heap::isMarked(stub->structure())
2232                         || !Heap::isMarked(stub->executable())) {
2233                         if (Options::verboseOSR()) {
2234                             dataLog(
2235                                 "Clearing closure call from ", *this, " to ",
2236                                 stub->executable()->hashFor(callLinkInfo(i).specializationKind()),
2237                                 ", stub routine ", RawPointer(stub), ".\n");
2238                         }
2239                         callLinkInfo(i).unlink(*m_vm, repatchBuffer);
2240                     }
2241                 } else if (!Heap::isMarked(callLinkInfo(i).callee.get())) {
2242                     if (Options::verboseOSR()) {
2243                         dataLog(
2244                             "Clearing call from ", *this, " to ",
2245                             RawPointer(callLinkInfo(i).callee.get()), " (",
2246                             callLinkInfo(i).callee.get()->executable()->hashFor(
2247                                 callLinkInfo(i).specializationKind()),
2248                             ").\n");
2249                     }
2250                     callLinkInfo(i).unlink(*m_vm, repatchBuffer);
2251                 }
2252             }
2253             if (!!callLinkInfo(i).lastSeenCallee
2254                 && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
2255                 callLinkInfo(i).lastSeenCallee.clear();
2256         }
2257         for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i) {
2258             StructureStubInfo& stubInfo = m_structureStubInfos[i];
2259             
2260             if (stubInfo.visitWeakReferences())
2261                 continue;
2262             
2263             resetStubDuringGCInternal(repatchBuffer, stubInfo);
2264         }
2265     }
2266 #endif
2267 }
2268
2269 #if ENABLE(JIT)
2270 void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2271 {
2272     if (stubInfo.accessType == access_unset)
2273         return;
2274     
2275     RepatchBuffer repatchBuffer(this);
2276     resetStubInternal(repatchBuffer, stubInfo);
2277 }
2278
2279 void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2280 {
2281     AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2282     
2283     if (Options::verboseOSR()) {
2284         // This can be called from GC destructor calls, so we don't try to do a full dump
2285         // of the CodeBlock.
2286         dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
2287     }
2288
2289     switch (jitType()) {
2290     case JITCode::BaselineJIT:
2291         if (isGetByIdAccess(accessType))
2292             JIT::resetPatchGetById(repatchBuffer, &stubInfo);
2293         else {
2294             RELEASE_ASSERT(isPutByIdAccess(accessType));
2295             JIT::resetPatchPutById(repatchBuffer, &stubInfo);
2296         }
2297         break;
2298     case JITCode::DFGJIT:
2299         if (isGetByIdAccess(accessType))
2300             DFG::dfgResetGetByID(repatchBuffer, stubInfo);
2301         else if (isPutByIdAccess(accessType))
2302             DFG::dfgResetPutByID(repatchBuffer, stubInfo);
2303         else {
2304             RELEASE_ASSERT(isInAccess(accessType));
2305             DFG::dfgResetIn(repatchBuffer, stubInfo);
2306         }
2307         break;
2308     default:
2309         RELEASE_ASSERT_NOT_REACHED();
2310         break;
2311     }
2312     
2313     stubInfo.reset();
2314 }
2315
2316 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2317 {
2318     resetStubInternal(repatchBuffer, stubInfo);
2319     stubInfo.resetByGC = true;
2320 }
2321 #endif
2322
2323 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2324 {
2325     visitor.append(&m_globalObject);
2326     visitor.append(&m_ownerExecutable);
2327     visitor.append(&m_unlinkedCode);
2328     if (m_rareData)
2329         m_rareData->m_evalCodeCache.visitAggregate(visitor);
2330     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2331     for (size_t i = 0; i < m_functionExprs.size(); ++i)
2332         visitor.append(&m_functionExprs[i]);
2333     for (size_t i = 0; i < m_functionDecls.size(); ++i)
2334         visitor.append(&m_functionDecls[i]);
2335     for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2336         m_objectAllocationProfiles[i].visitAggregate(visitor);
2337
2338     updateAllPredictions(Collection);
2339 }
2340
2341 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2342 {
2343     UNUSED_PARAM(visitor);
2344
2345 #if ENABLE(DFG_JIT)
2346     if (!JITCode::isOptimizingJIT(jitType()))
2347         return;
2348     
2349     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2350
2351     for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2352         if (!!dfgCommon->transitions[i].m_codeOrigin)
2353             visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2354         visitor.append(&dfgCommon->transitions[i].m_from);
2355         visitor.append(&dfgCommon->transitions[i].m_to);
2356     }
2357     
2358     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2359         visitor.append(&dfgCommon->weakReferences[i]);
2360 #endif    
2361 }
2362
2363 CodeBlock* CodeBlock::baselineVersion()
2364 {
2365 #if ENABLE(JIT)
2366     // When we're initializing the original baseline code block, we won't be able
2367     // to get its replacement. But we'll know that it's the original baseline code
2368     // block because it won't have JIT code yet and it won't have an alternative.
2369     if (jitType() == JITCode::None && !alternative())
2370         return this;
2371     
2372     CodeBlock* result = replacement();
2373     ASSERT(result);
2374     while (result->alternative())
2375         result = result->alternative();
2376     ASSERT(result);
2377     ASSERT(JITCode::isBaselineCode(result->jitType()));
2378     return result;
2379 #else
2380     return this;
2381 #endif
2382 }
2383
2384 #if ENABLE(JIT)
2385 bool CodeBlock::hasOptimizedReplacement()
2386 {
2387     ASSERT(JITCode::isBaselineCode(jitType()));
2388     bool result = JITCode::isHigherTier(replacement()->jitType(), jitType());
2389     if (result)
2390         ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
2391     else {
2392         ASSERT(JITCode::isBaselineCode(replacement()->jitType()));
2393         ASSERT(replacement() == this);
2394     }
2395     return result;
2396 }
2397 #endif
2398
2399 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
2400 {
2401     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2402
2403     if (!m_rareData)
2404         return 0;
2405     
2406     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2407     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2408         // Handlers are ordered innermost first, so the first handler we encounter
2409         // that contains the source address is the correct handler to use.
2410         if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
2411             return &exceptionHandlers[i];
2412     }
2413
2414     return 0;
2415 }
2416
2417 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2418 {
2419     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2420     return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2421 }
2422
2423 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2424 {
2425     int divot;
2426     int startOffset;
2427     int endOffset;
2428     unsigned line;
2429     unsigned column;
2430     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2431     return column;
2432 }
2433
2434 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2435 {
2436     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2437     divot += m_sourceOffset;
2438     column += line ? 1 : firstLineColumnOffset();
2439     line += m_ownerExecutable->lineNo();
2440 }
2441
2442 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2443 {
2444 #if ENABLE(LLINT)
2445     m_llintCallLinkInfos.shrinkToFit();
2446 #endif
2447 #if ENABLE(JIT)
2448     m_structureStubInfos.shrinkToFit();
2449     m_callLinkInfos.shrinkToFit();
2450 #endif
2451 #if ENABLE(VALUE_PROFILER)
2452     m_rareCaseProfiles.shrinkToFit();
2453     m_specialFastCaseProfiles.shrinkToFit();
2454 #endif
2455     
2456     if (shrinkMode == EarlyShrink) {
2457         m_additionalIdentifiers.shrinkToFit();
2458         m_functionDecls.shrinkToFit();
2459         m_functionExprs.shrinkToFit();
2460         m_constantRegisters.shrinkToFit();
2461         
2462         if (m_rareData) {
2463             m_rareData->m_switchJumpTables.shrinkToFit();
2464             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2465         }
2466     } // else don't shrink these, because we would have already pointed pointers into these tables.
2467
2468     if (m_rareData) {
2469         m_rareData->m_exceptionHandlers.shrinkToFit();
2470 #if ENABLE(JIT)
2471         m_rareData->m_callReturnIndexVector.shrinkToFit();
2472 #endif
2473 #if ENABLE(DFG_JIT)
2474         m_rareData->m_inlineCallFrames.shrinkToFit();
2475         m_rareData->m_codeOrigins.shrinkToFit();
2476 #endif
2477     }
2478 }
2479
2480 void CodeBlock::createActivation(CallFrame* callFrame)
2481 {
2482     ASSERT(codeType() == FunctionCode);
2483     ASSERT(needsFullScopeChain());
2484     ASSERT(!callFrame->uncheckedR(activationRegister()).jsValue());
2485     JSActivation* activation = JSActivation::create(callFrame->vm(), callFrame, this);
2486     callFrame->uncheckedR(activationRegister()) = JSValue(activation);
2487     callFrame->setScope(activation);
2488 }
2489
2490 unsigned CodeBlock::addOrFindConstant(JSValue v)
2491 {
2492     unsigned result;
2493     if (findConstant(v, result))
2494         return result;
2495     return addConstant(v);
2496 }
2497
2498 bool CodeBlock::findConstant(JSValue v, unsigned& index)
2499 {
2500     unsigned numberOfConstants = numberOfConstantRegisters();
2501     for (unsigned i = 0; i < numberOfConstants; ++i) {
2502         if (getConstant(FirstConstantRegisterIndex + i) == v) {
2503             index = i;
2504             return true;
2505         }
2506     }
2507     index = numberOfConstants;
2508     return false;
2509 }
2510
2511 #if ENABLE(JIT)
2512 void CodeBlock::unlinkCalls()
2513 {
2514     if (!!m_alternative)
2515         m_alternative->unlinkCalls();
2516 #if ENABLE(LLINT)
2517     for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2518         if (m_llintCallLinkInfos[i].isLinked())
2519             m_llintCallLinkInfos[i].unlink();
2520     }
2521 #endif
2522     if (!m_callLinkInfos.size())
2523         return;
2524     if (!m_vm->canUseJIT())
2525         return;
2526     RepatchBuffer repatchBuffer(this);
2527     for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
2528         if (!m_callLinkInfos[i].isLinked())
2529             continue;
2530         m_callLinkInfos[i].unlink(*m_vm, repatchBuffer);
2531     }
2532 }
2533
2534 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
2535 {
2536     noticeIncomingCall(callerFrame);
2537     m_incomingCalls.push(incoming);
2538 }
2539
2540 void CodeBlock::unlinkIncomingCalls()
2541 {
2542 #if ENABLE(LLINT)
2543     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2544         m_incomingLLIntCalls.begin()->unlink();
2545 #endif
2546     if (m_incomingCalls.isEmpty())
2547         return;
2548     RepatchBuffer repatchBuffer(this);
2549     while (m_incomingCalls.begin() != m_incomingCalls.end())
2550         m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer);
2551 }
2552 #endif // ENABLE(JIT)
2553
2554 #if ENABLE(LLINT)
2555 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
2556 {
2557     noticeIncomingCall(callerFrame);
2558     m_incomingLLIntCalls.push(incoming);
2559 }
2560 #endif // ENABLE(LLINT)
2561
2562 #if ENABLE(JIT)
2563 ClosureCallStubRoutine* CodeBlock::findClosureCallForReturnPC(ReturnAddressPtr returnAddress)
2564 {
2565     for (unsigned i = m_callLinkInfos.size(); i--;) {
2566         CallLinkInfo& info = m_callLinkInfos[i];
2567         if (!info.stub)
2568             continue;
2569         if (!info.stub->code().executableMemory()->contains(returnAddress.value()))
2570             continue;
2571
2572         RELEASE_ASSERT(info.stub->codeOrigin().bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
2573         return info.stub.get();
2574     }
2575     
2576     // The stub routine may have been jettisoned. This is rare, but we have to handle it.
2577     const JITStubRoutineSet& set = m_vm->heap.jitStubRoutines();
2578     for (unsigned i = set.size(); i--;) {
2579         GCAwareJITStubRoutine* genericStub = set.at(i);
2580         if (!genericStub->isClosureCall())
2581             continue;
2582         ClosureCallStubRoutine* stub = static_cast<ClosureCallStubRoutine*>(genericStub);
2583         if (!stub->code().executableMemory()->contains(returnAddress.value()))
2584             continue;
2585         RELEASE_ASSERT(stub->codeOrigin().bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
2586         return stub;
2587     }
2588     
2589     return 0;
2590 }
2591 #endif
2592
2593 unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddress)
2594 {
2595     UNUSED_PARAM(exec);
2596     UNUSED_PARAM(returnAddress);
2597 #if ENABLE(LLINT)
2598 #if !ENABLE(LLINT_C_LOOP)
2599     // When using the JIT, we could have addresses that are not bytecode
2600     // addresses. We check if the return address is in the LLint glue and
2601     // opcode handlers range here to ensure that we are looking at bytecode
2602     // before attempting to convert the return address into a bytecode offset.
2603     //
2604     // In the case of the C Loop LLInt, the JIT is disabled, and the only
2605     // valid return addresses should be bytecode PCs. So, we can and need to
2606     // forego this check because when we do not ENABLE(COMPUTED_GOTO_OPCODES),
2607     // then the bytecode "PC"s are actually the opcodeIDs and are not bounded
2608     // by llint_begin and llint_end.
2609     if (returnAddress.value() >= LLInt::getCodePtr(llint_begin)
2610         && returnAddress.value() <= LLInt::getCodePtr(llint_end))
2611 #endif
2612     {
2613         RELEASE_ASSERT(exec->codeBlock());
2614         RELEASE_ASSERT(exec->codeBlock() == this);
2615         RELEASE_ASSERT(JITCode::isBaselineCode(jitType()));
2616         Instruction* instruction = exec->currentVPC();
2617         RELEASE_ASSERT(instruction);
2618
2619         return bytecodeOffset(instruction);
2620     }
2621 #endif // !ENABLE(LLINT)
2622
2623 #if ENABLE(JIT)
2624     if (!m_rareData)
2625         return 1;
2626     Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector;
2627     if (!callIndices.size())
2628         return 1;
2629     
2630     if (jitCode()->contains(returnAddress.value())) {
2631         unsigned callReturnOffset = jitCode()->offsetOf(returnAddress.value());
2632         CallReturnOffsetToBytecodeOffset* result =
2633             binarySearch<CallReturnOffsetToBytecodeOffset, unsigned>(
2634                 callIndices, callIndices.size(), callReturnOffset, getCallReturnOffset);
2635         RELEASE_ASSERT(result->callReturnOffset == callReturnOffset);
2636         RELEASE_ASSERT(result->bytecodeOffset < instructionCount());
2637         return result->bytecodeOffset;
2638     }
2639     ClosureCallStubRoutine* closureInfo = findClosureCallForReturnPC(returnAddress);
2640     CodeOrigin origin = closureInfo->codeOrigin();
2641     while (InlineCallFrame* inlineCallFrame = origin.inlineCallFrame) {
2642         if (inlineCallFrame->baselineCodeBlock() == this)
2643             break;
2644         origin = inlineCallFrame->caller;
2645         RELEASE_ASSERT(origin.bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
2646     }
2647     RELEASE_ASSERT(origin.bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
2648     unsigned bytecodeIndex = origin.bytecodeIndex;
2649     RELEASE_ASSERT(bytecodeIndex < instructionCount());
2650     return bytecodeIndex;
2651 #endif // ENABLE(JIT)
2652
2653 #if !ENABLE(LLINT) && !ENABLE(JIT)
2654     return 1;
2655 #endif
2656 }
2657
2658 void CodeBlock::clearEvalCache()
2659 {
2660     if (!!m_alternative)
2661         m_alternative->clearEvalCache();
2662     if (!m_rareData)
2663         return;
2664     m_rareData->m_evalCodeCache.clear();
2665 }
2666
2667 template<typename T, size_t inlineCapacity, typename U, typename V>
2668 inline void replaceExistingEntries(Vector<T, inlineCapacity, U>& target, Vector<T, inlineCapacity, V>& source)
2669 {
2670     ASSERT(target.size() <= source.size());
2671     for (size_t i = 0; i < target.size(); ++i)
2672         target[i] = source[i];
2673 }
2674
2675 void CodeBlock::copyPostParseDataFrom(CodeBlock* alternative)
2676 {
2677     if (!alternative)
2678         return;
2679     
2680     replaceExistingEntries(m_constantRegisters, alternative->m_constantRegisters);
2681     replaceExistingEntries(m_functionDecls, alternative->m_functionDecls);
2682     replaceExistingEntries(m_functionExprs, alternative->m_functionExprs);
2683     if (!!m_rareData && !!alternative->m_rareData)
2684         replaceExistingEntries(m_rareData->m_constantBuffers, alternative->m_rareData->m_constantBuffers);
2685 }
2686
2687 void CodeBlock::copyPostParseDataFromAlternative()
2688 {
2689     copyPostParseDataFrom(m_alternative.get());
2690 }
2691
2692 #if ENABLE(JIT)
2693 void CodeBlock::reoptimize()
2694 {
2695     ASSERT(replacement() != this);
2696     ASSERT(replacement()->alternative() == this);
2697     if (DFG::shouldShowDisassembly())
2698         dataLog(*replacement(), " will be jettisoned due to reoptimization of ", *this, ".\n");
2699     replacement()->jettison();
2700     countReoptimization();
2701 }
2702
2703 CodeBlock* ProgramCodeBlock::replacement()
2704 {
2705     return &static_cast<ProgramExecutable*>(ownerExecutable())->generatedBytecode();
2706 }
2707
2708 CodeBlock* EvalCodeBlock::replacement()
2709 {
2710     return &static_cast<EvalExecutable*>(ownerExecutable())->generatedBytecode();
2711 }
2712
2713 CodeBlock* FunctionCodeBlock::replacement()
2714 {
2715     return &static_cast<FunctionExecutable*>(ownerExecutable())->generatedBytecodeFor(m_isConstructor ? CodeForConstruct : CodeForCall);
2716 }
2717
2718 #if ENABLE(DFG_JIT)
2719 JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, CompilationResult& result, unsigned bytecodeIndex)
2720 {
2721     if (JITCode::isHigherTier(replacement()->jitType(), jitType())) {
2722         result = CompilationNotNeeded;
2723         return 0;
2724     }
2725     JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scope, result, bytecodeIndex);
2726     return error;
2727 }
2728
2729 CompilationResult ProgramCodeBlock::replaceWithDeferredOptimizedCode(PassRefPtr<DFG::Plan> plan)
2730 {
2731     return static_cast<ProgramExecutable*>(ownerExecutable())->replaceWithDeferredOptimizedCode(plan);
2732 }
2733
2734 JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, CompilationResult& result, unsigned bytecodeIndex)
2735 {
2736     if (JITCode::isHigherTier(replacement()->jitType(), jitType())) {
2737         result = CompilationNotNeeded;
2738         return 0;
2739     }
2740     JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scope, result, bytecodeIndex);
2741     return error;
2742 }
2743
2744 CompilationResult EvalCodeBlock::replaceWithDeferredOptimizedCode(PassRefPtr<DFG::Plan> plan)
2745 {
2746     return static_cast<EvalExecutable*>(ownerExecutable())->replaceWithDeferredOptimizedCode(plan);
2747 }
2748
2749 JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, CompilationResult& result, unsigned bytecodeIndex)
2750 {
2751     if (JITCode::isHigherTier(replacement()->jitType(), jitType())) {
2752         result = CompilationNotNeeded;
2753         return 0;
2754     }
2755     JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scope, result, bytecodeIndex, m_isConstructor ? CodeForConstruct : CodeForCall);
2756     return error;
2757 }
2758
2759 CompilationResult FunctionCodeBlock::replaceWithDeferredOptimizedCode(PassRefPtr<DFG::Plan> plan)
2760 {
2761     return static_cast<FunctionExecutable*>(ownerExecutable())->replaceWithDeferredOptimizedCodeFor(plan, m_isConstructor ? CodeForConstruct : CodeForCall);
2762 }
2763 #endif // ENABLE(DFG_JIT)
2764
2765 DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
2766 {
2767     return DFG::programCapabilityLevel(this);
2768 }
2769
2770 DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
2771 {
2772     return DFG::evalCapabilityLevel(this);
2773 }
2774
2775 DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
2776 {
2777     if (m_isConstructor)
2778         return DFG::functionForConstructCapabilityLevel(this);
2779     return DFG::functionForCallCapabilityLevel(this);
2780 }
2781
2782 void CodeBlock::jettison()
2783 {
2784     ASSERT(JITCode::isOptimizingJIT(jitType()));
2785     ASSERT(this == replacement());
2786     alternative()->optimizeAfterWarmUp();
2787     tallyFrequentExitSites();
2788     if (DFG::shouldShowDisassembly())
2789         dataLog("Jettisoning ", *this, ".\n");
2790     jettisonImpl();
2791 }
2792
2793 void ProgramCodeBlock::jettisonImpl()
2794 {
2795     static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm());
2796 }
2797
2798 void EvalCodeBlock::jettisonImpl()
2799 {
2800     static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*vm());
2801 }
2802
2803 void FunctionCodeBlock::jettisonImpl()
2804 {
2805     static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*vm(), m_isConstructor ? CodeForConstruct : CodeForCall);
2806 }
2807
2808 CompilationResult ProgramCodeBlock::jitCompileImpl(ExecState* exec)
2809 {
2810     ASSERT(jitType() == JITCode::InterpreterThunk);
2811     ASSERT(this == replacement());
2812     return static_cast<ProgramExecutable*>(ownerExecutable())->jitCompile(exec);
2813 }
2814
2815 CompilationResult EvalCodeBlock::jitCompileImpl(ExecState* exec)
2816 {
2817     ASSERT(jitType() == JITCode::InterpreterThunk);
2818     ASSERT(this == replacement());
2819     return static_cast<EvalExecutable*>(ownerExecutable())->jitCompile(exec);
2820 }
2821
2822 CompilationResult FunctionCodeBlock::jitCompileImpl(ExecState* exec)
2823 {
2824     ASSERT(jitType() == JITCode::InterpreterThunk);
2825     ASSERT(this == replacement());
2826     return static_cast<FunctionExecutable*>(ownerExecutable())->jitCompileFor(exec, m_isConstructor ? CodeForConstruct : CodeForCall);
2827 }
2828 #endif
2829
2830 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2831 {
2832     if (!codeOrigin.inlineCallFrame)
2833         return globalObject();
2834     return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->generatedBytecode().globalObject();
2835 }
2836
2837 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2838 {
2839     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2840     
2841     if (Options::verboseCallLink())
2842         dataLog("Noticing call link from ", *callerCodeBlock, " to ", *this, "\n");
2843     
2844     if (!m_shouldAlwaysBeInlined)
2845         return;
2846
2847 #if ENABLE(DFG_JIT)
2848     if (!hasBaselineJITProfiling())
2849         return;
2850
2851     if (!DFG::mightInlineFunction(this))
2852         return;
2853
2854     if (!canInline(m_capabilityLevelState))
2855         return;
2856
2857     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2858         // If the caller is still in the interpreter, then we can't expect inlining to
2859         // happen anytime soon. Assume it's profitable to optimize it separately. This
2860         // ensures that a function is SABI only if it is called no more frequently than
2861         // any of its callers.
2862         m_shouldAlwaysBeInlined = false;
2863         if (Options::verboseCallLink())
2864             dataLog("    Marking SABI because caller is in LLInt.\n");
2865         return;
2866     }
2867     
2868     if (callerCodeBlock->codeType() != FunctionCode) {
2869         // If the caller is either eval or global code, assume that that won't be
2870         // optimized anytime soon. For eval code this is particularly true since we
2871         // delay eval optimization by a *lot*.
2872         m_shouldAlwaysBeInlined = false;
2873         if (Options::verboseCallLink())
2874             dataLog("    Marking SABI because caller is not a function.\n");
2875         return;
2876     }
2877     
2878     ExecState* frame = callerFrame;
2879     for (unsigned i = Options::maximumInliningDepth(); i--; frame = frame->callerFrame()) {
2880         if (frame->hasHostCallFrameFlag())
2881             break;
2882         if (frame->codeBlock() == this) {
2883             // Recursive calls won't be inlined.
2884             if (Options::verboseCallLink())
2885                 dataLog("    Marking SABI because recursion was detected.\n");
2886             m_shouldAlwaysBeInlined = false;
2887             return;
2888         }
2889     }
2890     
2891     RELEASE_ASSERT(callerCodeBlock->m_capabilityLevelState != DFG::CapabilityLevelNotSet);
2892     
2893     if (canCompile(callerCodeBlock->m_capabilityLevelState))
2894         return;
2895     
2896     if (Options::verboseCallLink())
2897         dataLog("    Marking SABI because the caller is not a DFG candidate.\n");
2898     
2899     m_shouldAlwaysBeInlined = false;
2900 #endif
2901 }
2902
2903 #if ENABLE(JIT)
2904 unsigned CodeBlock::reoptimizationRetryCounter() const
2905 {
2906     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2907     return m_reoptimizationRetryCounter;
2908 }
2909
2910 void CodeBlock::countReoptimization()
2911 {
2912     m_reoptimizationRetryCounter++;
2913     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2914         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2915 }
2916
2917 unsigned CodeBlock::numberOfDFGCompiles()
2918 {
2919     ASSERT(JITCode::isBaselineCode(jitType()));
2920     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2921 }
2922
2923 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2924 {
2925     if (codeType() == EvalCode)
2926         return Options::evalThresholdMultiplier();
2927     
2928     return 1;
2929 }
2930
2931 double CodeBlock::optimizationThresholdScalingFactor()
2932 {
2933     // This expression arises from doing a least-squares fit of
2934     //
2935     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2936     //
2937     // against the data points:
2938     //
2939     //    x       F[x_]
2940     //    10       0.9          (smallest reasonable code block)
2941     //   200       1.0          (typical small-ish code block)
2942     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2943     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2944     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2945     // 10000       6.0          (similar to above)
2946     //
2947     // I achieve the minimization using the following Mathematica code:
2948     //
2949     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2950     //
2951     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2952     //
2953     // solution = 
2954     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2955     //         {a, b, c, d}][[2]]
2956     //
2957     // And the code below (to initialize a, b, c, d) is generated by:
2958     //
2959     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2960     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2961     //
2962     // We've long known the following to be true:
2963     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2964     //   than later.
2965     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2966     //   and sometimes have a large enough threshold that we never optimize them.
2967     // - The difference in cost is not totally linear because (a) just invoking the
2968     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2969     //   in the correlation between instruction count and the actual compilation cost
2970     //   that for those large blocks, the instruction count should not have a strong
2971     //   influence on our threshold.
2972     //
2973     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2974     // example where the heuristics were right (code block in 3d-cube with instruction
2975     // count 320, which got compiled early as it should have been) and one where they were
2976     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2977     // to compile and didn't run often enough to warrant compilation in my opinion), and
2978     // then threw in additional data points that represented my own guess of what our
2979     // heuristics should do for some round-numbered examples.
2980     //
2981     // The expression to which I decided to fit the data arose because I started with an
2982     // affine function, and then did two things: put the linear part in an Abs to ensure
2983     // that the fit didn't end up choosing a negative value of c (which would result in
2984     // the function turning over and going negative for large x) and I threw in a Sqrt
2985     // term because Sqrt represents my intution that the function should be more sensitive
2986     // to small changes in small values of x, but less sensitive when x gets large.
2987     
2988     // Note that the current fit essentially eliminates the linear portion of the
2989     // expression (c == 0.0).
2990     const double a = 0.061504;
2991     const double b = 1.02406;
2992     const double c = 0.0;
2993     const double d = 0.825914;
2994     
2995     double instructionCount = this->instructionCount();
2996     
2997     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2998     
2999     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
3000     if (Options::verboseOSR()) {
3001         dataLog(
3002             *this, ": instruction count is ", instructionCount,
3003             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
3004             "\n");
3005     }
3006     return result * codeTypeThresholdMultiplier();
3007 }
3008
3009 static int32_t clipThreshold(double threshold)
3010 {
3011     if (threshold < 1.0)
3012         return 1;
3013     
3014     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
3015         return std::numeric_limits<int32_t>::max();
3016     
3017     return static_cast<int32_t>(threshold);
3018 }
3019
3020 int32_t CodeBlock::counterValueForOptimizeAfterWarmUp()
3021 {
3022     return clipThreshold(
3023         Options::thresholdForOptimizeAfterWarmUp() *
3024         optimizationThresholdScalingFactor() *
3025         (1 << reoptimizationRetryCounter()));
3026 }
3027
3028 int32_t CodeBlock::counterValueForOptimizeAfterLongWarmUp()
3029 {
3030     return clipThreshold(
3031         Options::thresholdForOptimizeAfterLongWarmUp() *
3032         optimizationThresholdScalingFactor() *
3033         (1 << reoptimizationRetryCounter()));
3034 }
3035
3036 int32_t CodeBlock::counterValueForOptimizeSoon()
3037 {
3038     return clipThreshold(
3039         Options::thresholdForOptimizeSoon() *
3040         optimizationThresholdScalingFactor() *
3041         (1 << reoptimizationRetryCounter()));
3042 }
3043
3044 bool CodeBlock::checkIfOptimizationThresholdReached()
3045 {
3046 #if ENABLE(DFG_JIT)
3047     if (m_vm->worklist
3048         && m_vm->worklist->compilationState(this) == DFG::Worklist::Compiled) {
3049         optimizeNextInvocation();
3050         return true;
3051     }
3052 #endif
3053     
3054     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
3055 }
3056
3057 void CodeBlock::optimizeNextInvocation()
3058 {
3059     if (Options::verboseOSR())
3060         dataLog(*this, ": Optimizing next invocation.\n");
3061     m_jitExecuteCounter.setNewThreshold(0, this);
3062 }
3063
3064 void CodeBlock::dontOptimizeAnytimeSoon()
3065 {
3066     if (Options::verboseOSR())
3067         dataLog(*this, ": Not optimizing anytime soon.\n");
3068     m_jitExecuteCounter.deferIndefinitely();
3069 }
3070
3071 void CodeBlock::optimizeAfterWarmUp()
3072 {
3073     if (Options::verboseOSR())
3074         dataLog(*this, ": Optimizing after warm-up.\n");
3075 #if ENABLE(DFG_JIT)
3076     m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
3077 #endif
3078 }
3079
3080 void CodeBlock::optimizeAfterLongWarmUp()
3081 {
3082     if (Options::verboseOSR())
3083         dataLog(*this, ": Optimizing after long warm-up.\n");
3084 #if ENABLE(DFG_JIT)
3085     m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
3086 #endif
3087 }
3088
3089 void CodeBlock::optimizeSoon()
3090 {
3091     if (Options::verboseOSR())
3092         dataLog(*this, ": Optimizing soon.\n");
3093 #if ENABLE(DFG_JIT)
3094     m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeSoon(), this);
3095 #endif
3096 }
3097
3098 void CodeBlock::forceOptimizationSlowPathConcurrently()
3099 {
3100     if (Options::verboseOSR())
3101         dataLog(*this, ": Forcing slow path concurrently.\n");
3102     m_jitExecuteCounter.forceSlowPathConcurrently();
3103 }
3104
3105 #if ENABLE(DFG_JIT)
3106 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
3107 {
3108     RELEASE_ASSERT(jitType() == JITCode::BaselineJIT);
3109     RELEASE_ASSERT((result == CompilationSuccessful) == (replacement() != this));
3110     switch (result) {
3111     case CompilationSuccessful:
3112         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3113         optimizeNextInvocation();
3114         break;
3115     case CompilationFailed:
3116         dontOptimizeAnytimeSoon();
3117         break;
3118     case CompilationDeferred:
3119         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3120         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3121         // necessarily guarantee anything. So, we make sure that even if that
3122         // function ends up being a no-op, we still eventually retry and realize
3123         // that we have optimized code ready.
3124         optimizeAfterWarmUp();
3125         break;
3126     case CompilationInvalidated:
3127         // Retry with exponential backoff.
3128         countReoptimization();
3129         optimizeAfterWarmUp();
3130         break;
3131     default:
3132         RELEASE_ASSERT_NOT_REACHED();
3133         break;
3134     }
3135 }
3136
3137 #endif
3138     
3139 static bool structureStubInfoLessThan(const StructureStubInfo& a, const StructureStubInfo& b)
3140 {
3141     return a.callReturnLocation.executableAddress() < b.callReturnLocation.executableAddress();
3142 }
3143
3144 void CodeBlock::sortStructureStubInfos()
3145 {
3146     std::sort(m_structureStubInfos.begin(), m_structureStubInfos.end(), structureStubInfoLessThan);
3147 }
3148
3149 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
3150 {
3151     ASSERT(JITCode::isOptimizingJIT(jitType()));
3152     // Compute this the lame way so we don't saturate. This is called infrequently
3153     // enough that this loop won't hurt us.
3154     unsigned result = desiredThreshold;
3155     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
3156         unsigned newResult = result << 1;
3157         if (newResult < result)
3158             return std::numeric_limits<uint32_t>::max();
3159         result = newResult;
3160     }
3161     return result;
3162 }
3163
3164 uint32_t CodeBlock::exitCountThresholdForReoptimization()
3165 {
3166     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3167 }
3168
3169 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3170 {
3171     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3172 }
3173
3174 bool CodeBlock::shouldReoptimizeNow()
3175 {
3176     return osrExitCounter() >= exitCountThresholdForReoptimization();
3177 }
3178
3179 bool CodeBlock::shouldReoptimizeFromLoopNow()
3180 {
3181     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3182 }
3183 #endif
3184
3185 #if ENABLE(VALUE_PROFILER)
3186 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
3187 {
3188     for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
3189         if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
3190             return &m_arrayProfiles[i];
3191     }
3192     return 0;
3193 }
3194
3195 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
3196 {
3197     ArrayProfile* result = getArrayProfile(bytecodeOffset);
3198     if (result)
3199         return result;
3200     return addArrayProfile(bytecodeOffset);
3201 }
3202
3203 void CodeBlock::updateAllPredictionsAndCountLiveness(
3204     OperationInProgress operation, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
3205 {
3206     ConcurrentJITLocker locker(m_lock);
3207     
3208     numberOfLiveNonArgumentValueProfiles = 0;
3209     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3210     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3211         ValueProfile* profile = getFromAllValueProfiles(i);
3212         unsigned numSamples = profile->totalNumberOfSamples();
3213         if (numSamples > ValueProfile::numberOfBuckets)
3214             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
3215         numberOfSamplesInProfiles += numSamples;
3216         if (profile->m_bytecodeOffset < 0) {
3217             profile->computeUpdatedPrediction(locker, operation);
3218             continue;
3219         }
3220         if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
3221             numberOfLiveNonArgumentValueProfiles++;
3222         profile->computeUpdatedPrediction(locker, operation);
3223     }
3224     
3225 #if ENABLE(DFG_JIT)
3226     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker, operation);
3227 #endif
3228 }
3229
3230 void CodeBlock::updateAllValueProfilePredictions(OperationInProgress operation)
3231 {
3232     unsigned ignoredValue1, ignoredValue2;
3233     updateAllPredictionsAndCountLiveness(operation, ignoredValue1, ignoredValue2);
3234 }
3235
3236 void CodeBlock::updateAllArrayPredictions()
3237 {
3238     ConcurrentJITLocker locker(m_lock);
3239     
3240     for (unsigned i = m_arrayProfiles.size(); i--;)
3241         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
3242     
3243     // Don't count these either, for similar reasons.
3244     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
3245         m_arrayAllocationProfiles[i].updateIndexingType();
3246 }
3247
3248 void CodeBlock::updateAllPredictions(OperationInProgress operation)
3249 {
3250     updateAllValueProfilePredictions(operation);
3251     updateAllArrayPredictions();
3252 }
3253
3254 bool CodeBlock::shouldOptimizeNow()
3255 {
3256     if (Options::verboseOSR())
3257         dataLog("Considering optimizing ", *this, "...\n");
3258
3259 #if ENABLE(VERBOSE_VALUE_PROFILE)
3260     dumpValueProfiles();
3261 #endif