Unreviewed, rolling out r161540.
[WebKit.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008, 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "BytecodeGenerator.h"
34 #include "BytecodeUseDef.h"
35 #include "CallLinkStatus.h"
36 #include "DFGCapabilities.h"
37 #include "DFGCommon.h"
38 #include "DFGDriver.h"
39 #include "DFGNode.h"
40 #include "DFGWorklist.h"
41 #include "Debugger.h"
42 #include "Interpreter.h"
43 #include "JIT.h"
44 #include "JITStubs.h"
45 #include "JSActivation.h"
46 #include "JSCJSValue.h"
47 #include "JSFunction.h"
48 #include "JSNameScope.h"
49 #include "LLIntEntrypoint.h"
50 #include "LowLevelInterpreter.h"
51 #include "Operations.h"
52 #include "PolymorphicPutByIdList.h"
53 #include "ReduceWhitespace.h"
54 #include "Repatch.h"
55 #include "RepatchBuffer.h"
56 #include "SlotVisitorInlines.h"
57 #include <wtf/BagToHashMap.h>
58 #include <wtf/CommaPrinter.h>
59 #include <wtf/StringExtras.h>
60 #include <wtf/StringPrintStream.h>
61
62 #if ENABLE(DFG_JIT)
63 #include "DFGOperations.h"
64 #endif
65
66 #if ENABLE(FTL_JIT)
67 #include "FTLJITCode.h"
68 #endif
69
70 namespace JSC {
71
72 CString CodeBlock::inferredName() const
73 {
74     switch (codeType()) {
75     case GlobalCode:
76         return "<global>";
77     case EvalCode:
78         return "<eval>";
79     case FunctionCode:
80         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
81     default:
82         CRASH();
83         return CString("", 0);
84     }
85 }
86
87 bool CodeBlock::hasHash() const
88 {
89     return !!m_hash;
90 }
91
92 bool CodeBlock::isSafeToComputeHash() const
93 {
94     return !isCompilationThread();
95 }
96
97 CodeBlockHash CodeBlock::hash() const
98 {
99     if (!m_hash) {
100         RELEASE_ASSERT(isSafeToComputeHash());
101         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
102     }
103     return m_hash;
104 }
105
106 CString CodeBlock::sourceCodeForTools() const
107 {
108     if (codeType() != FunctionCode)
109         return ownerExecutable()->source().toUTF8();
110     
111     SourceProvider* provider = source();
112     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
113     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
114     unsigned unlinkedStartOffset = unlinked->startOffset();
115     unsigned linkedStartOffset = executable->source().startOffset();
116     int delta = linkedStartOffset - unlinkedStartOffset;
117     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
118     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
119     return toCString(
120         "function ",
121         provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
122 }
123
124 CString CodeBlock::sourceCodeOnOneLine() const
125 {
126     return reduceWhitespace(sourceCodeForTools());
127 }
128
129 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
130 {
131     if (hasHash() || isSafeToComputeHash())
132         out.print(inferredName(), "#", hash(), ":[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
133     else
134         out.print(inferredName(), "#<no-hash>:[", RawPointer(this), "->", RawPointer(ownerExecutable()), ", ", jitType, codeType());
135
136     if (codeType() == FunctionCode)
137         out.print(specializationKind());
138     out.print(", ", instructionCount());
139     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
140         out.print(" (SABI)");
141     if (ownerExecutable()->neverInline())
142         out.print(" (NeverInline)");
143     if (ownerExecutable()->isStrictMode())
144         out.print(" (StrictMode)");
145     out.print("]");
146 }
147
148 void CodeBlock::dump(PrintStream& out) const
149 {
150     dumpAssumingJITType(out, jitType());
151 }
152
153 static CString constantName(int k, JSValue value)
154 {
155     return toCString(value, "(@k", k - FirstConstantRegisterIndex, ")");
156 }
157
158 static CString idName(int id0, const Identifier& ident)
159 {
160     return toCString(ident.impl(), "(@id", id0, ")");
161 }
162
163 CString CodeBlock::registerName(int r) const
164 {
165     if (r == missingThisObjectMarker())
166         return "<null>";
167
168     if (isConstantRegisterIndex(r))
169         return constantName(r, getConstant(r));
170
171     if (operandIsArgument(r)) {
172         if (!VirtualRegister(r).toArgument())
173             return "this";
174         return toCString("arg", VirtualRegister(r).toArgument());
175     }
176
177     return toCString("loc", VirtualRegister(r).toLocal());
178 }
179
180 static CString regexpToSourceString(RegExp* regExp)
181 {
182     char postfix[5] = { '/', 0, 0, 0, 0 };
183     int index = 1;
184     if (regExp->global())
185         postfix[index++] = 'g';
186     if (regExp->ignoreCase())
187         postfix[index++] = 'i';
188     if (regExp->multiline())
189         postfix[index] = 'm';
190
191     return toCString("/", regExp->pattern().impl(), postfix);
192 }
193
194 static CString regexpName(int re, RegExp* regexp)
195 {
196     return toCString(regexpToSourceString(regexp), "(@re", re, ")");
197 }
198
199 NEVER_INLINE static const char* debugHookName(int debugHookID)
200 {
201     switch (static_cast<DebugHookID>(debugHookID)) {
202         case DidEnterCallFrame:
203             return "didEnterCallFrame";
204         case WillLeaveCallFrame:
205             return "willLeaveCallFrame";
206         case WillExecuteStatement:
207             return "willExecuteStatement";
208         case WillExecuteProgram:
209             return "willExecuteProgram";
210         case DidExecuteProgram:
211             return "didExecuteProgram";
212         case DidReachBreakpoint:
213             return "didReachBreakpoint";
214     }
215
216     RELEASE_ASSERT_NOT_REACHED();
217     return "";
218 }
219
220 void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
221 {
222     int r0 = (++it)->u.operand;
223     int r1 = (++it)->u.operand;
224
225     printLocationAndOp(out, exec, location, it, op);
226     out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
227 }
228
229 void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
230 {
231     int r0 = (++it)->u.operand;
232     int r1 = (++it)->u.operand;
233     int r2 = (++it)->u.operand;
234     printLocationAndOp(out, exec, location, it, op);
235     out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
236 }
237
238 void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
239 {
240     int r0 = (++it)->u.operand;
241     int offset = (++it)->u.operand;
242     printLocationAndOp(out, exec, location, it, op);
243     out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
244 }
245
246 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
247 {
248     const char* op;
249     switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
250     case op_get_by_id:
251         op = "get_by_id";
252         break;
253     case op_get_by_id_out_of_line:
254         op = "get_by_id_out_of_line";
255         break;
256     case op_get_by_id_self:
257         op = "get_by_id_self";
258         break;
259     case op_get_by_id_proto:
260         op = "get_by_id_proto";
261         break;
262     case op_get_by_id_chain:
263         op = "get_by_id_chain";
264         break;
265     case op_get_by_id_getter_self:
266         op = "get_by_id_getter_self";
267         break;
268     case op_get_by_id_getter_proto:
269         op = "get_by_id_getter_proto";
270         break;
271     case op_get_by_id_getter_chain:
272         op = "get_by_id_getter_chain";
273         break;
274     case op_get_by_id_custom_self:
275         op = "get_by_id_custom_self";
276         break;
277     case op_get_by_id_custom_proto:
278         op = "get_by_id_custom_proto";
279         break;
280     case op_get_by_id_custom_chain:
281         op = "get_by_id_custom_chain";
282         break;
283     case op_get_by_id_generic:
284         op = "get_by_id_generic";
285         break;
286     case op_get_array_length:
287         op = "array_length";
288         break;
289     case op_get_string_length:
290         op = "string_length";
291         break;
292     default:
293         RELEASE_ASSERT_NOT_REACHED();
294         op = 0;
295     }
296     int r0 = (++it)->u.operand;
297     int r1 = (++it)->u.operand;
298     int id0 = (++it)->u.operand;
299     printLocationAndOp(out, exec, location, it, op);
300     out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
301     it += 4; // Increment up to the value profiler.
302 }
303
304 #if ENABLE(JIT) || ENABLE(LLINT) // unused in some configurations
305 static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, const Identifier& ident)
306 {
307     if (!structure)
308         return;
309     
310     out.printf("%s = %p", name, structure);
311     
312     PropertyOffset offset = structure->getConcurrently(exec->vm(), ident.impl());
313     if (offset != invalidOffset)
314         out.printf(" (offset = %d)", offset);
315 }
316 #endif
317
318 #if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
319 static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, const Identifier& ident)
320 {
321     out.printf("chain = %p: [", chain);
322     bool first = true;
323     for (WriteBarrier<Structure>* currentStructure = chain->head();
324          *currentStructure;
325          ++currentStructure) {
326         if (first)
327             first = false;
328         else
329             out.printf(", ");
330         dumpStructure(out, "struct", exec, currentStructure->get(), ident);
331     }
332     out.printf("]");
333 }
334 #endif
335
336 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
337 {
338     Instruction* instruction = instructions().begin() + location;
339
340     const Identifier& ident = identifier(instruction[3].u.operand);
341     
342     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
343     
344 #if ENABLE(LLINT)
345     if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
346         out.printf(" llint(array_length)");
347     else if (Structure* structure = instruction[4].u.structure.get()) {
348         out.printf(" llint(");
349         dumpStructure(out, "struct", exec, structure, ident);
350         out.printf(")");
351     }
352 #endif
353
354 #if ENABLE(JIT)
355     if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
356         StructureStubInfo& stubInfo = *stubPtr;
357         if (stubInfo.seen) {
358             out.printf(" jit(");
359             
360             Structure* baseStructure = 0;
361             Structure* prototypeStructure = 0;
362             StructureChain* chain = 0;
363             PolymorphicAccessStructureList* structureList = 0;
364             int listSize = 0;
365             
366             switch (stubInfo.accessType) {
367             case access_get_by_id_self:
368                 out.printf("self");
369                 baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
370                 break;
371             case access_get_by_id_proto:
372                 out.printf("proto");
373                 baseStructure = stubInfo.u.getByIdProto.baseObjectStructure.get();
374                 prototypeStructure = stubInfo.u.getByIdProto.prototypeStructure.get();
375                 break;
376             case access_get_by_id_chain:
377                 out.printf("chain");
378                 baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
379                 chain = stubInfo.u.getByIdChain.chain.get();
380                 break;
381             case access_get_by_id_self_list:
382                 out.printf("self_list");
383                 structureList = stubInfo.u.getByIdSelfList.structureList;
384                 listSize = stubInfo.u.getByIdSelfList.listSize;
385                 break;
386             case access_get_by_id_proto_list:
387                 out.printf("proto_list");
388                 structureList = stubInfo.u.getByIdProtoList.structureList;
389                 listSize = stubInfo.u.getByIdProtoList.listSize;
390                 break;
391             case access_unset:
392                 out.printf("unset");
393                 break;
394             case access_get_by_id_generic:
395                 out.printf("generic");
396                 break;
397             case access_get_array_length:
398                 out.printf("array_length");
399                 break;
400             case access_get_string_length:
401                 out.printf("string_length");
402                 break;
403             default:
404                 RELEASE_ASSERT_NOT_REACHED();
405                 break;
406             }
407             
408             if (baseStructure) {
409                 out.printf(", ");
410                 dumpStructure(out, "struct", exec, baseStructure, ident);
411             }
412             
413             if (prototypeStructure) {
414                 out.printf(", ");
415                 dumpStructure(out, "prototypeStruct", exec, baseStructure, ident);
416             }
417             
418             if (chain) {
419                 out.printf(", ");
420                 dumpChain(out, exec, chain, ident);
421             }
422             
423             if (structureList) {
424                 out.printf(", list = %p: [", structureList);
425                 for (int i = 0; i < listSize; ++i) {
426                     if (i)
427                         out.printf(", ");
428                     out.printf("(");
429                     dumpStructure(out, "base", exec, structureList->list[i].base.get(), ident);
430                     if (structureList->list[i].isChain) {
431                         if (structureList->list[i].u.chain.get()) {
432                             out.printf(", ");
433                             dumpChain(out, exec, structureList->list[i].u.chain.get(), ident);
434                         }
435                     } else {
436                         if (structureList->list[i].u.proto.get()) {
437                             out.printf(", ");
438                             dumpStructure(out, "proto", exec, structureList->list[i].u.proto.get(), ident);
439                         }
440                     }
441                     out.printf(")");
442                 }
443                 out.printf("]");
444             }
445             out.printf(")");
446         }
447     }
448 #else
449     UNUSED_PARAM(map);
450 #endif
451 }
452
453 void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling)
454 {
455     int dst = (++it)->u.operand;
456     int func = (++it)->u.operand;
457     int argCount = (++it)->u.operand;
458     int registerOffset = (++it)->u.operand;
459     printLocationAndOp(out, exec, location, it, op);
460     out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
461     if (cacheDumpMode == DumpCaches) {
462 #if ENABLE(LLINT)
463         LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
464         if (callLinkInfo->lastSeenCallee) {
465             out.printf(
466                 " llint(%p, exec %p)",
467                 callLinkInfo->lastSeenCallee.get(),
468                 callLinkInfo->lastSeenCallee->executable());
469         }
470 #endif
471 #if ENABLE(JIT)
472         if (numberOfCallLinkInfos()) {
473             JSFunction* target = getCallLinkInfo(location).lastSeenCallee.get();
474             if (target)
475                 out.printf(" jit(%p, exec %p)", target, target->executable());
476         }
477 #endif
478         out.print(" status(", CallLinkStatus::computeFor(this, location), ")");
479     }
480     ++it;
481     dumpArrayProfiling(out, it, hasPrintedProfiling);
482     dumpValueProfiling(out, it, hasPrintedProfiling);
483 }
484
485 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
486 {
487     int r0 = (++it)->u.operand;
488     int id0 = (++it)->u.operand;
489     int r1 = (++it)->u.operand;
490     printLocationAndOp(out, exec, location, it, op);
491     out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
492     it += 5;
493 }
494
495 void CodeBlock::dumpBytecode(PrintStream& out)
496 {
497     // We only use the ExecState* for things that don't actually lead to JS execution,
498     // like converting a JSString to a String. Hence the globalExec is appropriate.
499     ExecState* exec = m_globalObject->globalExec();
500     
501     size_t instructionCount = 0;
502
503     for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
504         ++instructionCount;
505
506     out.print(*this);
507     out.printf(
508         ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
509         static_cast<unsigned long>(instructions().size()),
510         static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
511         m_numParameters, m_numCalleeRegisters, m_numVars);
512     if (symbolTable() && symbolTable()->captureCount()) {
513         out.printf(
514             "; %d captured var(s) (from r%d to r%d, inclusive)",
515             symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() + 1);
516     }
517     if (usesArguments()) {
518         out.printf(
519             "; uses arguments, in r%d, r%d",
520             argumentsRegister().offset(),
521             unmodifiedArgumentsRegister(argumentsRegister()).offset());
522     }
523     if (needsFullScopeChain() && codeType() == FunctionCode)
524         out.printf("; activation in r%d", activationRegister().offset());
525     out.printf("\n");
526     
527     StubInfoMap stubInfos;
528 #if ENABLE(JIT)
529     {
530         ConcurrentJITLocker locker(m_lock);
531         getStubInfoMap(locker, stubInfos);
532     }
533 #endif
534     
535     const Instruction* begin = instructions().begin();
536     const Instruction* end = instructions().end();
537     for (const Instruction* it = begin; it != end; ++it)
538         dumpBytecode(out, exec, begin, it, stubInfos);
539     
540     if (numberOfIdentifiers()) {
541         out.printf("\nIdentifiers:\n");
542         size_t i = 0;
543         do {
544             out.printf("  id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
545             ++i;
546         } while (i != numberOfIdentifiers());
547     }
548
549     if (!m_constantRegisters.isEmpty()) {
550         out.printf("\nConstants:\n");
551         size_t i = 0;
552         do {
553             out.printf("   k%u = %s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data());
554             ++i;
555         } while (i < m_constantRegisters.size());
556     }
557
558     if (size_t count = m_unlinkedCode->numberOfRegExps()) {
559         out.printf("\nm_regexps:\n");
560         size_t i = 0;
561         do {
562             out.printf("  re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
563             ++i;
564         } while (i < count);
565     }
566
567     if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
568         out.printf("\nException Handlers:\n");
569         unsigned i = 0;
570         do {
571             out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
572             ++i;
573         } while (i < m_rareData->m_exceptionHandlers.size());
574     }
575     
576     if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
577         out.printf("Switch Jump Tables:\n");
578         unsigned i = 0;
579         do {
580             out.printf("  %1d = {\n", i);
581             int entry = 0;
582             Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
583             for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
584                 if (!*iter)
585                     continue;
586                 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
587             }
588             out.printf("      }\n");
589             ++i;
590         } while (i < m_rareData->m_switchJumpTables.size());
591     }
592     
593     if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
594         out.printf("\nString Switch Jump Tables:\n");
595         unsigned i = 0;
596         do {
597             out.printf("  %1d = {\n", i);
598             StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
599             for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
600                 out.printf("\t\t\"%s\" => %04d\n", String(iter->key).utf8().data(), iter->value.branchOffset);
601             out.printf("      }\n");
602             ++i;
603         } while (i < m_rareData->m_stringSwitchJumpTables.size());
604     }
605
606     out.printf("\n");
607 }
608
609 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
610 {
611     if (hasPrintedProfiling) {
612         out.print("; ");
613         return;
614     }
615     
616     out.print("    ");
617     hasPrintedProfiling = true;
618 }
619
620 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
621 {
622     ConcurrentJITLocker locker(m_lock);
623     
624     ++it;
625     CString description = it->u.profile->briefDescription(locker);
626     if (!description.length())
627         return;
628     beginDumpProfiling(out, hasPrintedProfiling);
629     out.print(description);
630 }
631
632 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
633 {
634     ConcurrentJITLocker locker(m_lock);
635     
636     ++it;
637     if (!it->u.arrayProfile)
638         return;
639     CString description = it->u.arrayProfile->briefDescription(locker, this);
640     if (!description.length())
641         return;
642     beginDumpProfiling(out, hasPrintedProfiling);
643     out.print(description);
644 }
645
646 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
647 {
648     if (!profile || !profile->m_counter)
649         return;
650
651     beginDumpProfiling(out, hasPrintedProfiling);
652     out.print(name, profile->m_counter);
653 }
654
655 void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it, const StubInfoMap& map)
656 {
657     int location = it - begin;
658     bool hasPrintedProfiling = false;
659     switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
660         case op_enter: {
661             printLocationAndOp(out, exec, location, it, "enter");
662             break;
663         }
664         case op_touch_entry: {
665             printLocationAndOp(out, exec, location, it, "touch_entry");
666             break;
667         }
668         case op_create_activation: {
669             int r0 = (++it)->u.operand;
670             printLocationOpAndRegisterOperand(out, exec, location, it, "create_activation", r0);
671             break;
672         }
673         case op_create_arguments: {
674             int r0 = (++it)->u.operand;
675             printLocationOpAndRegisterOperand(out, exec, location, it, "create_arguments", r0);
676             break;
677         }
678         case op_init_lazy_reg: {
679             int r0 = (++it)->u.operand;
680             printLocationOpAndRegisterOperand(out, exec, location, it, "init_lazy_reg", r0);
681             break;
682         }
683         case op_get_callee: {
684             int r0 = (++it)->u.operand;
685             printLocationOpAndRegisterOperand(out, exec, location, it, "get_callee", r0);
686             ++it;
687             break;
688         }
689         case op_create_this: {
690             int r0 = (++it)->u.operand;
691             int r1 = (++it)->u.operand;
692             unsigned inferredInlineCapacity = (++it)->u.operand;
693             printLocationAndOp(out, exec, location, it, "create_this");
694             out.printf("%s, %s, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity);
695             break;
696         }
697         case op_to_this: {
698             int r0 = (++it)->u.operand;
699             printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
700             ++it; // Skip value profile.
701             break;
702         }
703         case op_new_object: {
704             int r0 = (++it)->u.operand;
705             unsigned inferredInlineCapacity = (++it)->u.operand;
706             printLocationAndOp(out, exec, location, it, "new_object");
707             out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
708             ++it; // Skip object allocation profile.
709             break;
710         }
711         case op_new_array: {
712             int dst = (++it)->u.operand;
713             int argv = (++it)->u.operand;
714             int argc = (++it)->u.operand;
715             printLocationAndOp(out, exec, location, it, "new_array");
716             out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
717             ++it; // Skip array allocation profile.
718             break;
719         }
720         case op_new_array_with_size: {
721             int dst = (++it)->u.operand;
722             int length = (++it)->u.operand;
723             printLocationAndOp(out, exec, location, it, "new_array_with_size");
724             out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
725             ++it; // Skip array allocation profile.
726             break;
727         }
728         case op_new_array_buffer: {
729             int dst = (++it)->u.operand;
730             int argv = (++it)->u.operand;
731             int argc = (++it)->u.operand;
732             printLocationAndOp(out, exec, location, it, "new_array_buffer");
733             out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
734             ++it; // Skip array allocation profile.
735             break;
736         }
737         case op_new_regexp: {
738             int r0 = (++it)->u.operand;
739             int re0 = (++it)->u.operand;
740             printLocationAndOp(out, exec, location, it, "new_regexp");
741             out.printf("%s, ", registerName(r0).data());
742             if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
743                 out.printf("%s", regexpName(re0, regexp(re0)).data());
744             else
745                 out.printf("bad_regexp(%d)", re0);
746             break;
747         }
748         case op_mov: {
749             int r0 = (++it)->u.operand;
750             int r1 = (++it)->u.operand;
751             printLocationAndOp(out, exec, location, it, "mov");
752             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
753             break;
754         }
755         case op_captured_mov: {
756             int r0 = (++it)->u.operand;
757             int r1 = (++it)->u.operand;
758             printLocationAndOp(out, exec, location, it, "captured_mov");
759             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
760             ++it;
761             break;
762         }
763         case op_not: {
764             printUnaryOp(out, exec, location, it, "not");
765             break;
766         }
767         case op_eq: {
768             printBinaryOp(out, exec, location, it, "eq");
769             break;
770         }
771         case op_eq_null: {
772             printUnaryOp(out, exec, location, it, "eq_null");
773             break;
774         }
775         case op_neq: {
776             printBinaryOp(out, exec, location, it, "neq");
777             break;
778         }
779         case op_neq_null: {
780             printUnaryOp(out, exec, location, it, "neq_null");
781             break;
782         }
783         case op_stricteq: {
784             printBinaryOp(out, exec, location, it, "stricteq");
785             break;
786         }
787         case op_nstricteq: {
788             printBinaryOp(out, exec, location, it, "nstricteq");
789             break;
790         }
791         case op_less: {
792             printBinaryOp(out, exec, location, it, "less");
793             break;
794         }
795         case op_lesseq: {
796             printBinaryOp(out, exec, location, it, "lesseq");
797             break;
798         }
799         case op_greater: {
800             printBinaryOp(out, exec, location, it, "greater");
801             break;
802         }
803         case op_greatereq: {
804             printBinaryOp(out, exec, location, it, "greatereq");
805             break;
806         }
807         case op_inc: {
808             int r0 = (++it)->u.operand;
809             printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
810             break;
811         }
812         case op_dec: {
813             int r0 = (++it)->u.operand;
814             printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
815             break;
816         }
817         case op_to_number: {
818             printUnaryOp(out, exec, location, it, "to_number");
819             break;
820         }
821         case op_negate: {
822             printUnaryOp(out, exec, location, it, "negate");
823             break;
824         }
825         case op_add: {
826             printBinaryOp(out, exec, location, it, "add");
827             ++it;
828             break;
829         }
830         case op_mul: {
831             printBinaryOp(out, exec, location, it, "mul");
832             ++it;
833             break;
834         }
835         case op_div: {
836             printBinaryOp(out, exec, location, it, "div");
837             ++it;
838             break;
839         }
840         case op_mod: {
841             printBinaryOp(out, exec, location, it, "mod");
842             break;
843         }
844         case op_sub: {
845             printBinaryOp(out, exec, location, it, "sub");
846             ++it;
847             break;
848         }
849         case op_lshift: {
850             printBinaryOp(out, exec, location, it, "lshift");
851             break;            
852         }
853         case op_rshift: {
854             printBinaryOp(out, exec, location, it, "rshift");
855             break;
856         }
857         case op_urshift: {
858             printBinaryOp(out, exec, location, it, "urshift");
859             break;
860         }
861         case op_bitand: {
862             printBinaryOp(out, exec, location, it, "bitand");
863             ++it;
864             break;
865         }
866         case op_bitxor: {
867             printBinaryOp(out, exec, location, it, "bitxor");
868             ++it;
869             break;
870         }
871         case op_bitor: {
872             printBinaryOp(out, exec, location, it, "bitor");
873             ++it;
874             break;
875         }
876         case op_check_has_instance: {
877             int r0 = (++it)->u.operand;
878             int r1 = (++it)->u.operand;
879             int r2 = (++it)->u.operand;
880             int offset = (++it)->u.operand;
881             printLocationAndOp(out, exec, location, it, "check_has_instance");
882             out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
883             break;
884         }
885         case op_instanceof: {
886             int r0 = (++it)->u.operand;
887             int r1 = (++it)->u.operand;
888             int r2 = (++it)->u.operand;
889             printLocationAndOp(out, exec, location, it, "instanceof");
890             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
891             break;
892         }
893         case op_unsigned: {
894             printUnaryOp(out, exec, location, it, "unsigned");
895             break;
896         }
897         case op_typeof: {
898             printUnaryOp(out, exec, location, it, "typeof");
899             break;
900         }
901         case op_is_undefined: {
902             printUnaryOp(out, exec, location, it, "is_undefined");
903             break;
904         }
905         case op_is_boolean: {
906             printUnaryOp(out, exec, location, it, "is_boolean");
907             break;
908         }
909         case op_is_number: {
910             printUnaryOp(out, exec, location, it, "is_number");
911             break;
912         }
913         case op_is_string: {
914             printUnaryOp(out, exec, location, it, "is_string");
915             break;
916         }
917         case op_is_object: {
918             printUnaryOp(out, exec, location, it, "is_object");
919             break;
920         }
921         case op_is_function: {
922             printUnaryOp(out, exec, location, it, "is_function");
923             break;
924         }
925         case op_in: {
926             printBinaryOp(out, exec, location, it, "in");
927             break;
928         }
929         case op_init_global_const_nop: {
930             printLocationAndOp(out, exec, location, it, "init_global_const_nop");
931             it++;
932             it++;
933             it++;
934             it++;
935             break;
936         }
937         case op_init_global_const: {
938             WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
939             int r0 = (++it)->u.operand;
940             printLocationAndOp(out, exec, location, it, "init_global_const");
941             out.printf("g%d(%p), %s", m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(r0).data());
942             it++;
943             it++;
944             break;
945         }
946         case op_get_by_id:
947         case op_get_by_id_out_of_line:
948         case op_get_by_id_self:
949         case op_get_by_id_proto:
950         case op_get_by_id_chain:
951         case op_get_by_id_getter_self:
952         case op_get_by_id_getter_proto:
953         case op_get_by_id_getter_chain:
954         case op_get_by_id_custom_self:
955         case op_get_by_id_custom_proto:
956         case op_get_by_id_custom_chain:
957         case op_get_by_id_generic:
958         case op_get_array_length:
959         case op_get_string_length: {
960             printGetByIdOp(out, exec, location, it);
961             printGetByIdCacheStatus(out, exec, location, map);
962             dumpValueProfiling(out, it, hasPrintedProfiling);
963             break;
964         }
965         case op_get_arguments_length: {
966             printUnaryOp(out, exec, location, it, "get_arguments_length");
967             it++;
968             break;
969         }
970         case op_put_by_id: {
971             printPutByIdOp(out, exec, location, it, "put_by_id");
972             break;
973         }
974         case op_put_by_id_out_of_line: {
975             printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
976             break;
977         }
978         case op_put_by_id_replace: {
979             printPutByIdOp(out, exec, location, it, "put_by_id_replace");
980             break;
981         }
982         case op_put_by_id_transition: {
983             printPutByIdOp(out, exec, location, it, "put_by_id_transition");
984             break;
985         }
986         case op_put_by_id_transition_direct: {
987             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
988             break;
989         }
990         case op_put_by_id_transition_direct_out_of_line: {
991             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
992             break;
993         }
994         case op_put_by_id_transition_normal: {
995             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
996             break;
997         }
998         case op_put_by_id_transition_normal_out_of_line: {
999             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
1000             break;
1001         }
1002         case op_put_by_id_generic: {
1003             printPutByIdOp(out, exec, location, it, "put_by_id_generic");
1004             break;
1005         }
1006         case op_put_getter_setter: {
1007             int r0 = (++it)->u.operand;
1008             int id0 = (++it)->u.operand;
1009             int r1 = (++it)->u.operand;
1010             int r2 = (++it)->u.operand;
1011             printLocationAndOp(out, exec, location, it, "put_getter_setter");
1012             out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
1013             break;
1014         }
1015         case op_del_by_id: {
1016             int r0 = (++it)->u.operand;
1017             int r1 = (++it)->u.operand;
1018             int id0 = (++it)->u.operand;
1019             printLocationAndOp(out, exec, location, it, "del_by_id");
1020             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1021             break;
1022         }
1023         case op_get_by_val: {
1024             int r0 = (++it)->u.operand;
1025             int r1 = (++it)->u.operand;
1026             int r2 = (++it)->u.operand;
1027             printLocationAndOp(out, exec, location, it, "get_by_val");
1028             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1029             dumpArrayProfiling(out, it, hasPrintedProfiling);
1030             dumpValueProfiling(out, it, hasPrintedProfiling);
1031             break;
1032         }
1033         case op_get_argument_by_val: {
1034             int r0 = (++it)->u.operand;
1035             int r1 = (++it)->u.operand;
1036             int r2 = (++it)->u.operand;
1037             printLocationAndOp(out, exec, location, it, "get_argument_by_val");
1038             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1039             ++it;
1040             dumpValueProfiling(out, it, hasPrintedProfiling);
1041             break;
1042         }
1043         case op_get_by_pname: {
1044             int r0 = (++it)->u.operand;
1045             int r1 = (++it)->u.operand;
1046             int r2 = (++it)->u.operand;
1047             int r3 = (++it)->u.operand;
1048             int r4 = (++it)->u.operand;
1049             int r5 = (++it)->u.operand;
1050             printLocationAndOp(out, exec, location, it, "get_by_pname");
1051             out.printf("%s, %s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data(), registerName(r5).data());
1052             break;
1053         }
1054         case op_put_by_val: {
1055             int r0 = (++it)->u.operand;
1056             int r1 = (++it)->u.operand;
1057             int r2 = (++it)->u.operand;
1058             printLocationAndOp(out, exec, location, it, "put_by_val");
1059             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1060             dumpArrayProfiling(out, it, hasPrintedProfiling);
1061             break;
1062         }
1063         case op_put_by_val_direct: {
1064             int r0 = (++it)->u.operand;
1065             int r1 = (++it)->u.operand;
1066             int r2 = (++it)->u.operand;
1067             printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1068             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1069             dumpArrayProfiling(out, it, hasPrintedProfiling);
1070             break;
1071         }
1072         case op_del_by_val: {
1073             int r0 = (++it)->u.operand;
1074             int r1 = (++it)->u.operand;
1075             int r2 = (++it)->u.operand;
1076             printLocationAndOp(out, exec, location, it, "del_by_val");
1077             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1078             break;
1079         }
1080         case op_put_by_index: {
1081             int r0 = (++it)->u.operand;
1082             unsigned n0 = (++it)->u.operand;
1083             int r1 = (++it)->u.operand;
1084             printLocationAndOp(out, exec, location, it, "put_by_index");
1085             out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1086             break;
1087         }
1088         case op_jmp: {
1089             int offset = (++it)->u.operand;
1090             printLocationAndOp(out, exec, location, it, "jmp");
1091             out.printf("%d(->%d)", offset, location + offset);
1092             break;
1093         }
1094         case op_jtrue: {
1095             printConditionalJump(out, exec, begin, it, location, "jtrue");
1096             break;
1097         }
1098         case op_jfalse: {
1099             printConditionalJump(out, exec, begin, it, location, "jfalse");
1100             break;
1101         }
1102         case op_jeq_null: {
1103             printConditionalJump(out, exec, begin, it, location, "jeq_null");
1104             break;
1105         }
1106         case op_jneq_null: {
1107             printConditionalJump(out, exec, begin, it, location, "jneq_null");
1108             break;
1109         }
1110         case op_jneq_ptr: {
1111             int r0 = (++it)->u.operand;
1112             Special::Pointer pointer = (++it)->u.specialPointer;
1113             int offset = (++it)->u.operand;
1114             printLocationAndOp(out, exec, location, it, "jneq_ptr");
1115             out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1116             break;
1117         }
1118         case op_jless: {
1119             int r0 = (++it)->u.operand;
1120             int r1 = (++it)->u.operand;
1121             int offset = (++it)->u.operand;
1122             printLocationAndOp(out, exec, location, it, "jless");
1123             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1124             break;
1125         }
1126         case op_jlesseq: {
1127             int r0 = (++it)->u.operand;
1128             int r1 = (++it)->u.operand;
1129             int offset = (++it)->u.operand;
1130             printLocationAndOp(out, exec, location, it, "jlesseq");
1131             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1132             break;
1133         }
1134         case op_jgreater: {
1135             int r0 = (++it)->u.operand;
1136             int r1 = (++it)->u.operand;
1137             int offset = (++it)->u.operand;
1138             printLocationAndOp(out, exec, location, it, "jgreater");
1139             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1140             break;
1141         }
1142         case op_jgreatereq: {
1143             int r0 = (++it)->u.operand;
1144             int r1 = (++it)->u.operand;
1145             int offset = (++it)->u.operand;
1146             printLocationAndOp(out, exec, location, it, "jgreatereq");
1147             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1148             break;
1149         }
1150         case op_jnless: {
1151             int r0 = (++it)->u.operand;
1152             int r1 = (++it)->u.operand;
1153             int offset = (++it)->u.operand;
1154             printLocationAndOp(out, exec, location, it, "jnless");
1155             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1156             break;
1157         }
1158         case op_jnlesseq: {
1159             int r0 = (++it)->u.operand;
1160             int r1 = (++it)->u.operand;
1161             int offset = (++it)->u.operand;
1162             printLocationAndOp(out, exec, location, it, "jnlesseq");
1163             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1164             break;
1165         }
1166         case op_jngreater: {
1167             int r0 = (++it)->u.operand;
1168             int r1 = (++it)->u.operand;
1169             int offset = (++it)->u.operand;
1170             printLocationAndOp(out, exec, location, it, "jngreater");
1171             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1172             break;
1173         }
1174         case op_jngreatereq: {
1175             int r0 = (++it)->u.operand;
1176             int r1 = (++it)->u.operand;
1177             int offset = (++it)->u.operand;
1178             printLocationAndOp(out, exec, location, it, "jngreatereq");
1179             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1180             break;
1181         }
1182         case op_loop_hint: {
1183             printLocationAndOp(out, exec, location, it, "loop_hint");
1184             break;
1185         }
1186         case op_switch_imm: {
1187             int tableIndex = (++it)->u.operand;
1188             int defaultTarget = (++it)->u.operand;
1189             int scrutineeRegister = (++it)->u.operand;
1190             printLocationAndOp(out, exec, location, it, "switch_imm");
1191             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1192             break;
1193         }
1194         case op_switch_char: {
1195             int tableIndex = (++it)->u.operand;
1196             int defaultTarget = (++it)->u.operand;
1197             int scrutineeRegister = (++it)->u.operand;
1198             printLocationAndOp(out, exec, location, it, "switch_char");
1199             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1200             break;
1201         }
1202         case op_switch_string: {
1203             int tableIndex = (++it)->u.operand;
1204             int defaultTarget = (++it)->u.operand;
1205             int scrutineeRegister = (++it)->u.operand;
1206             printLocationAndOp(out, exec, location, it, "switch_string");
1207             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1208             break;
1209         }
1210         case op_new_func: {
1211             int r0 = (++it)->u.operand;
1212             int f0 = (++it)->u.operand;
1213             int shouldCheck = (++it)->u.operand;
1214             printLocationAndOp(out, exec, location, it, "new_func");
1215             out.printf("%s, f%d, %s", registerName(r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
1216             break;
1217         }
1218         case op_new_captured_func: {
1219             int r0 = (++it)->u.operand;
1220             int f0 = (++it)->u.operand;
1221             printLocationAndOp(out, exec, location, it, "new_captured_func");
1222             out.printf("%s, f%d", registerName(r0).data(), f0);
1223             ++it;
1224             break;
1225         }
1226         case op_new_func_exp: {
1227             int r0 = (++it)->u.operand;
1228             int f0 = (++it)->u.operand;
1229             printLocationAndOp(out, exec, location, it, "new_func_exp");
1230             out.printf("%s, f%d", registerName(r0).data(), f0);
1231             break;
1232         }
1233         case op_call: {
1234             printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling);
1235             break;
1236         }
1237         case op_call_eval: {
1238             printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling);
1239             break;
1240         }
1241         case op_call_varargs: {
1242             int result = (++it)->u.operand;
1243             int callee = (++it)->u.operand;
1244             int thisValue = (++it)->u.operand;
1245             int arguments = (++it)->u.operand;
1246             int firstFreeRegister = (++it)->u.operand;
1247             ++it;
1248             printLocationAndOp(out, exec, location, it, "call_varargs");
1249             out.printf("%s, %s, %s, %s, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister);
1250             dumpValueProfiling(out, it, hasPrintedProfiling);
1251             break;
1252         }
1253         case op_tear_off_activation: {
1254             int r0 = (++it)->u.operand;
1255             printLocationOpAndRegisterOperand(out, exec, location, it, "tear_off_activation", r0);
1256             break;
1257         }
1258         case op_tear_off_arguments: {
1259             int r0 = (++it)->u.operand;
1260             int r1 = (++it)->u.operand;
1261             printLocationAndOp(out, exec, location, it, "tear_off_arguments");
1262             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1263             break;
1264         }
1265         case op_ret: {
1266             int r0 = (++it)->u.operand;
1267             printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1268             break;
1269         }
1270         case op_ret_object_or_this: {
1271             int r0 = (++it)->u.operand;
1272             int r1 = (++it)->u.operand;
1273             printLocationAndOp(out, exec, location, it, "constructor_ret");
1274             out.printf("%s %s", registerName(r0).data(), registerName(r1).data());
1275             break;
1276         }
1277         case op_construct: {
1278             printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling);
1279             break;
1280         }
1281         case op_strcat: {
1282             int r0 = (++it)->u.operand;
1283             int r1 = (++it)->u.operand;
1284             int count = (++it)->u.operand;
1285             printLocationAndOp(out, exec, location, it, "strcat");
1286             out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1287             break;
1288         }
1289         case op_to_primitive: {
1290             int r0 = (++it)->u.operand;
1291             int r1 = (++it)->u.operand;
1292             printLocationAndOp(out, exec, location, it, "to_primitive");
1293             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1294             break;
1295         }
1296         case op_get_pnames: {
1297             int r0 = it[1].u.operand;
1298             int r1 = it[2].u.operand;
1299             int r2 = it[3].u.operand;
1300             int r3 = it[4].u.operand;
1301             int offset = it[5].u.operand;
1302             printLocationAndOp(out, exec, location, it, "get_pnames");
1303             out.printf("%s, %s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), offset, location + offset);
1304             it += OPCODE_LENGTH(op_get_pnames) - 1;
1305             break;
1306         }
1307         case op_next_pname: {
1308             int dest = it[1].u.operand;
1309             int base = it[2].u.operand;
1310             int i = it[3].u.operand;
1311             int size = it[4].u.operand;
1312             int iter = it[5].u.operand;
1313             int offset = it[6].u.operand;
1314             printLocationAndOp(out, exec, location, it, "next_pname");
1315             out.printf("%s, %s, %s, %s, %s, %d(->%d)", registerName(dest).data(), registerName(base).data(), registerName(i).data(), registerName(size).data(), registerName(iter).data(), offset, location + offset);
1316             it += OPCODE_LENGTH(op_next_pname) - 1;
1317             break;
1318         }
1319         case op_push_with_scope: {
1320             int r0 = (++it)->u.operand;
1321             printLocationOpAndRegisterOperand(out, exec, location, it, "push_with_scope", r0);
1322             break;
1323         }
1324         case op_pop_scope: {
1325             printLocationAndOp(out, exec, location, it, "pop_scope");
1326             break;
1327         }
1328         case op_push_name_scope: {
1329             int id0 = (++it)->u.operand;
1330             int r1 = (++it)->u.operand;
1331             unsigned attributes = (++it)->u.operand;
1332             printLocationAndOp(out, exec, location, it, "push_name_scope");
1333             out.printf("%s, %s, %u", idName(id0, identifier(id0)).data(), registerName(r1).data(), attributes);
1334             break;
1335         }
1336         case op_catch: {
1337             int r0 = (++it)->u.operand;
1338             printLocationOpAndRegisterOperand(out, exec, location, it, "catch", r0);
1339             break;
1340         }
1341         case op_throw: {
1342             int r0 = (++it)->u.operand;
1343             printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1344             break;
1345         }
1346         case op_throw_static_error: {
1347             int k0 = (++it)->u.operand;
1348             int k1 = (++it)->u.operand;
1349             printLocationAndOp(out, exec, location, it, "throw_static_error");
1350             out.printf("%s, %s", constantName(k0, getConstant(k0)).data(), k1 ? "true" : "false");
1351             break;
1352         }
1353         case op_debug: {
1354             int debugHookID = (++it)->u.operand;
1355             printLocationAndOp(out, exec, location, it, "debug");
1356             out.printf("%s", debugHookName(debugHookID));
1357             break;
1358         }
1359         case op_profile_will_call: {
1360             int function = (++it)->u.operand;
1361             printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
1362             break;
1363         }
1364         case op_profile_did_call: {
1365             int function = (++it)->u.operand;
1366             printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
1367             break;
1368         }
1369         case op_end: {
1370             int r0 = (++it)->u.operand;
1371             printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1372             break;
1373         }
1374         case op_resolve_scope: {
1375             int r0 = (++it)->u.operand;
1376             int id0 = (++it)->u.operand;
1377             int resolveModeAndType = (++it)->u.operand;
1378             ++it; // depth
1379             printLocationAndOp(out, exec, location, it, "resolve_scope");
1380             out.printf("%s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
1381             ++it;
1382             break;
1383         }
1384         case op_get_from_scope: {
1385             int r0 = (++it)->u.operand;
1386             int r1 = (++it)->u.operand;
1387             int id0 = (++it)->u.operand;
1388             int resolveModeAndType = (++it)->u.operand;
1389             ++it; // Structure
1390             ++it; // Operand
1391             ++it; // Skip value profile.
1392             printLocationAndOp(out, exec, location, it, "get_from_scope");
1393             out.printf("%s, %s, %s, %d", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), resolveModeAndType);
1394             break;
1395         }
1396         case op_put_to_scope: {
1397             int r0 = (++it)->u.operand;
1398             int id0 = (++it)->u.operand;
1399             int r1 = (++it)->u.operand;
1400             int resolveModeAndType = (++it)->u.operand;
1401             ++it; // Structure
1402             ++it; // Operand
1403             printLocationAndOp(out, exec, location, it, "put_to_scope");
1404             out.printf("%s, %s, %s, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), resolveModeAndType);
1405             break;
1406         }
1407 #if ENABLE(LLINT_C_LOOP)
1408         default:
1409             RELEASE_ASSERT_NOT_REACHED();
1410 #endif
1411     }
1412
1413     dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1414     dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1415     
1416 #if ENABLE(DFG_JIT)
1417     Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1418     if (!exitSites.isEmpty()) {
1419         out.print(" !! frequent exits: ");
1420         CommaPrinter comma;
1421         for (unsigned i = 0; i < exitSites.size(); ++i)
1422             out.print(comma, exitSites[i].kind());
1423     }
1424 #else // ENABLE(DFG_JIT)
1425     UNUSED_PARAM(location);
1426 #endif // ENABLE(DFG_JIT)
1427     out.print("\n");
1428 }
1429
1430 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset)
1431 {
1432     ExecState* exec = m_globalObject->globalExec();
1433     const Instruction* it = instructions().begin() + bytecodeOffset;
1434     dumpBytecode(out, exec, instructions().begin(), it);
1435 }
1436
1437 #define FOR_EACH_MEMBER_VECTOR(macro) \
1438     macro(instructions) \
1439     macro(callLinkInfos) \
1440     macro(linkedCallerList) \
1441     macro(identifiers) \
1442     macro(functionExpressions) \
1443     macro(constantRegisters)
1444
1445 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1446     macro(regexps) \
1447     macro(functions) \
1448     macro(exceptionHandlers) \
1449     macro(switchJumpTables) \
1450     macro(stringSwitchJumpTables) \
1451     macro(evalCodeCache) \
1452     macro(expressionInfo) \
1453     macro(lineInfo) \
1454     macro(callReturnIndexVector)
1455
1456 template<typename T>
1457 static size_t sizeInBytes(const Vector<T>& vector)
1458 {
1459     return vector.capacity() * sizeof(T);
1460 }
1461
1462 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1463     : m_globalObject(other.m_globalObject)
1464     , m_heap(other.m_heap)
1465     , m_numCalleeRegisters(other.m_numCalleeRegisters)
1466     , m_numVars(other.m_numVars)
1467     , m_isConstructor(other.m_isConstructor)
1468     , m_shouldAlwaysBeInlined(true)
1469     , m_didFailFTLCompilation(false)
1470     , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1471     , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1472     , m_vm(other.m_vm)
1473     , m_instructions(other.m_instructions)
1474     , m_thisRegister(other.m_thisRegister)
1475     , m_argumentsRegister(other.m_argumentsRegister)
1476     , m_activationRegister(other.m_activationRegister)
1477     , m_isStrictMode(other.m_isStrictMode)
1478     , m_needsActivation(other.m_needsActivation)
1479     , m_source(other.m_source)
1480     , m_sourceOffset(other.m_sourceOffset)
1481     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1482     , m_codeType(other.m_codeType)
1483     , m_additionalIdentifiers(other.m_additionalIdentifiers)
1484     , m_constantRegisters(other.m_constantRegisters)
1485     , m_functionDecls(other.m_functionDecls)
1486     , m_functionExprs(other.m_functionExprs)
1487     , m_osrExitCounter(0)
1488     , m_optimizationDelayCounter(0)
1489     , m_reoptimizationRetryCounter(0)
1490     , m_hash(other.m_hash)
1491 #if ENABLE(JIT)
1492     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1493 #endif
1494 {
1495     ASSERT(m_heap->isDeferred());
1496     
1497     if (SymbolTable* symbolTable = other.symbolTable())
1498         m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1499     
1500     setNumParameters(other.numParameters());
1501     optimizeAfterWarmUp();
1502     jitAfterWarmUp();
1503
1504     if (other.m_rareData) {
1505         createRareDataIfNecessary();
1506         
1507         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1508         m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1509         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1510         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1511     }
1512     
1513     m_heap->m_codeBlocks.add(this);
1514     m_heap->reportExtraMemoryCost(sizeof(CodeBlock));
1515 }
1516
1517 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1518     : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1519     , m_heap(&m_globalObject->vm().heap)
1520     , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1521     , m_numVars(unlinkedCodeBlock->m_numVars)
1522     , m_isConstructor(unlinkedCodeBlock->isConstructor())
1523     , m_shouldAlwaysBeInlined(true)
1524     , m_didFailFTLCompilation(false)
1525     , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1526     , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1527     , m_vm(unlinkedCodeBlock->vm())
1528     , m_thisRegister(unlinkedCodeBlock->thisRegister())
1529     , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
1530     , m_activationRegister(unlinkedCodeBlock->activationRegister())
1531     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1532     , m_needsActivation(unlinkedCodeBlock->needsFullScopeChain() && unlinkedCodeBlock->codeType() == FunctionCode)
1533     , m_source(sourceProvider)
1534     , m_sourceOffset(sourceOffset)
1535     , m_firstLineColumnOffset(firstLineColumnOffset)
1536     , m_codeType(unlinkedCodeBlock->codeType())
1537     , m_osrExitCounter(0)
1538     , m_optimizationDelayCounter(0)
1539     , m_reoptimizationRetryCounter(0)
1540 #if ENABLE(JIT)
1541     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1542 #endif
1543 {
1544     ASSERT(m_heap->isDeferred());
1545
1546     bool didCloneSymbolTable = false;
1547     
1548     if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
1549         if (codeType() == FunctionCode && symbolTable->captureCount()) {
1550             m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->clone(*m_vm));
1551             didCloneSymbolTable = true;
1552         } else
1553             m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1554     }
1555     
1556     ASSERT(m_source);
1557     setNumParameters(unlinkedCodeBlock->numParameters());
1558
1559     setConstantRegisters(unlinkedCodeBlock->constantRegisters());
1560     if (unlinkedCodeBlock->usesGlobalObject())
1561         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().offset()].set(*m_vm, ownerExecutable, m_globalObject.get());
1562     m_functionDecls.grow(unlinkedCodeBlock->numberOfFunctionDecls());
1563     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1564         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1565         unsigned lineCount = unlinkedExecutable->lineCount();
1566         unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1567         bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
1568         unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
1569         bool endColumnIsOnStartLine = !lineCount;
1570         unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
1571         unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1572         unsigned sourceLength = unlinkedExecutable->sourceLength();
1573         SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1574         FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
1575         m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
1576     }
1577
1578     m_functionExprs.grow(unlinkedCodeBlock->numberOfFunctionExprs());
1579     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1580         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1581         unsigned lineCount = unlinkedExecutable->lineCount();
1582         unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1583         bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
1584         unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
1585         bool endColumnIsOnStartLine = !lineCount;
1586         unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
1587         unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1588         unsigned sourceLength = unlinkedExecutable->sourceLength();
1589         SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1590         FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
1591         m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
1592     }
1593
1594     if (unlinkedCodeBlock->hasRareData()) {
1595         createRareDataIfNecessary();
1596         if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1597             m_rareData->m_constantBuffers.grow(count);
1598             for (size_t i = 0; i < count; i++) {
1599                 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1600                 m_rareData->m_constantBuffers[i] = buffer;
1601             }
1602         }
1603         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1604             m_rareData->m_exceptionHandlers.grow(count);
1605             size_t nonLocalScopeDepth = scope->depth();
1606             for (size_t i = 0; i < count; i++) {
1607                 const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
1608                 m_rareData->m_exceptionHandlers[i].start = handler.start;
1609                 m_rareData->m_exceptionHandlers[i].end = handler.end;
1610                 m_rareData->m_exceptionHandlers[i].target = handler.target;
1611                 m_rareData->m_exceptionHandlers[i].scopeDepth = nonLocalScopeDepth + handler.scopeDepth;
1612 #if ENABLE(JIT) && ENABLE(LLINT)
1613                 m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch)));
1614 #endif
1615             }
1616         }
1617
1618         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1619             m_rareData->m_stringSwitchJumpTables.grow(count);
1620             for (size_t i = 0; i < count; i++) {
1621                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1622                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1623                 for (; ptr != end; ++ptr) {
1624                     OffsetLocation offset;
1625                     offset.branchOffset = ptr->value;
1626                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1627                 }
1628             }
1629         }
1630
1631         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1632             m_rareData->m_switchJumpTables.grow(count);
1633             for (size_t i = 0; i < count; i++) {
1634                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1635                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1636                 destTable.branchOffsets = sourceTable.branchOffsets;
1637                 destTable.min = sourceTable.min;
1638             }
1639         }
1640     }
1641
1642     // Allocate metadata buffers for the bytecode
1643 #if ENABLE(LLINT)
1644     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1645         m_llintCallLinkInfos.resizeToFit(size);
1646 #endif
1647     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1648         m_arrayProfiles.grow(size);
1649     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1650         m_arrayAllocationProfiles.resizeToFit(size);
1651     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1652         m_valueProfiles.resizeToFit(size);
1653     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1654         m_objectAllocationProfiles.resizeToFit(size);
1655
1656     // Copy and translate the UnlinkedInstructions
1657     size_t instructionCount = unlinkedCodeBlock->instructions().size();
1658     UnlinkedInstruction* pc = unlinkedCodeBlock->instructions().data();
1659     Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1660     for (size_t i = 0; i < unlinkedCodeBlock->instructions().size(); ) {
1661         unsigned opLength = opcodeLength(pc[i].u.opcode);
1662         instructions[i] = vm()->interpreter->getOpcode(pc[i].u.opcode);
1663         for (size_t j = 1; j < opLength; ++j) {
1664             if (sizeof(int32_t) != sizeof(intptr_t))
1665                 instructions[i + j].u.pointer = 0;
1666             instructions[i + j].u.operand = pc[i + j].u.operand;
1667         }
1668         switch (pc[i].u.opcode) {
1669         case op_get_by_val:
1670         case op_get_argument_by_val: {
1671             int arrayProfileIndex = pc[i + opLength - 2].u.operand;
1672             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1673
1674             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1675             // fallthrough
1676         }
1677         case op_get_by_id:
1678         case op_call_varargs: {
1679             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1680             ASSERT(profile->m_bytecodeOffset == -1);
1681             profile->m_bytecodeOffset = i;
1682             instructions[i + opLength - 1] = profile;
1683             break;
1684         }
1685         case op_put_by_val: {
1686             int arrayProfileIndex = pc[i + opLength - 1].u.operand;
1687             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1688             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1689             break;
1690         }
1691         case op_put_by_val_direct: {
1692             int arrayProfileIndex = pc[i + opLength - 1].u.operand;
1693             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1694             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1695             break;
1696         }
1697
1698         case op_new_array:
1699         case op_new_array_buffer:
1700         case op_new_array_with_size: {
1701             int arrayAllocationProfileIndex = pc[i + opLength - 1].u.operand;
1702             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1703             break;
1704         }
1705         case op_new_object: {
1706             int objectAllocationProfileIndex = pc[i + opLength - 1].u.operand;
1707             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1708             int inferredInlineCapacity = pc[i + opLength - 2].u.operand;
1709
1710             instructions[i + opLength - 1] = objectAllocationProfile;
1711             objectAllocationProfile->initialize(*vm(),
1712                 m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
1713             break;
1714         }
1715
1716         case op_call:
1717         case op_call_eval: {
1718             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1719             ASSERT(profile->m_bytecodeOffset == -1);
1720             profile->m_bytecodeOffset = i;
1721             instructions[i + opLength - 1] = profile;
1722             int arrayProfileIndex = pc[i + opLength - 2].u.operand;
1723             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1724             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1725 #if ENABLE(LLINT)
1726             instructions[i + 5] = &m_llintCallLinkInfos[pc[i + 5].u.operand];
1727 #endif
1728             break;
1729         }
1730         case op_construct: {
1731 #if ENABLE(LLINT)
1732             instructions[i + 5] = &m_llintCallLinkInfos[pc[i + 5].u.operand];
1733 #endif
1734             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1735             ASSERT(profile->m_bytecodeOffset == -1);
1736             profile->m_bytecodeOffset = i;
1737             instructions[i + opLength - 1] = profile;
1738             break;
1739         }
1740         case op_get_by_id_out_of_line:
1741         case op_get_by_id_self:
1742         case op_get_by_id_proto:
1743         case op_get_by_id_chain:
1744         case op_get_by_id_getter_self:
1745         case op_get_by_id_getter_proto:
1746         case op_get_by_id_getter_chain:
1747         case op_get_by_id_custom_self:
1748         case op_get_by_id_custom_proto:
1749         case op_get_by_id_custom_chain:
1750         case op_get_by_id_generic:
1751         case op_get_array_length:
1752         case op_get_string_length:
1753             CRASH();
1754
1755         case op_init_global_const_nop: {
1756             ASSERT(codeType() == GlobalCode);
1757             Identifier ident = identifier(pc[i + 4].u.operand);
1758             SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
1759             if (entry.isNull())
1760                 break;
1761
1762             instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
1763             instructions[i + 1] = &m_globalObject->registerAt(entry.getIndex());
1764             break;
1765         }
1766
1767         case op_resolve_scope: {
1768             const Identifier& ident = identifier(pc[i + 2].u.operand);
1769             ResolveType type = static_cast<ResolveType>(pc[i + 3].u.operand);
1770
1771             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, type);
1772             instructions[i + 3].u.operand = op.type;
1773             instructions[i + 4].u.operand = op.depth;
1774             if (op.activation)
1775                 instructions[i + 5].u.activation.set(*vm(), ownerExecutable, op.activation);
1776             break;
1777         }
1778
1779         case op_get_from_scope: {
1780             ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
1781             ASSERT(profile->m_bytecodeOffset == -1);
1782             profile->m_bytecodeOffset = i;
1783             instructions[i + opLength - 1] = profile;
1784
1785             // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
1786             const Identifier& ident = identifier(pc[i + 3].u.operand);
1787             ResolveModeAndType modeAndType = ResolveModeAndType(pc[i + 4].u.operand);
1788             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, modeAndType.type());
1789
1790             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1791             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
1792                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
1793             else if (op.structure)
1794                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1795             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1796             break;
1797         }
1798
1799         case op_put_to_scope: {
1800             // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
1801             const Identifier& ident = identifier(pc[i + 2].u.operand);
1802             ResolveModeAndType modeAndType = ResolveModeAndType(pc[i + 4].u.operand);
1803             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Put, modeAndType.type());
1804
1805             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1806             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
1807                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
1808             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
1809                 if (op.watchpointSet)
1810                     op.watchpointSet->invalidate();
1811             } else if (op.structure)
1812                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1813             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1814             break;
1815         }
1816             
1817         case op_captured_mov:
1818         case op_new_captured_func: {
1819             StringImpl* uid = pc[i + 3].u.uid;
1820             if (!uid)
1821                 break;
1822             RELEASE_ASSERT(didCloneSymbolTable);
1823             ConcurrentJITLocker locker(m_symbolTable->m_lock);
1824             SymbolTable::Map::iterator iter = m_symbolTable->find(locker, uid);
1825             ASSERT(iter != m_symbolTable->end(locker));
1826             iter->value.prepareToWatch();
1827             instructions[i + 3].u.watchpointSet = iter->value.watchpointSet();
1828             break;
1829         }
1830
1831         default:
1832             break;
1833         }
1834         i += opLength;
1835     }
1836     m_instructions = WTF::RefCountedArray<Instruction>(instructions);
1837
1838     // Set optimization thresholds only after m_instructions is initialized, since these
1839     // rely on the instruction count (and are in theory permitted to also inspect the
1840     // instruction stream to more accurate assess the cost of tier-up).
1841     optimizeAfterWarmUp();
1842     jitAfterWarmUp();
1843
1844     // If the concurrent thread will want the code block's hash, then compute it here
1845     // synchronously.
1846     if (Options::showDisassembly()
1847         || Options::showDFGDisassembly()
1848         || Options::dumpBytecodeAtDFGTime()
1849         || Options::dumpGraphAtEachPhase()
1850         || Options::verboseCompilation()
1851         || Options::logCompilationChanges()
1852         || Options::validateGraph()
1853         || Options::validateGraphAtEachPhase()
1854         || Options::verboseOSR()
1855         || Options::verboseCompilationQueue()
1856         || Options::reportCompileTimes()
1857         || Options::verboseCFA())
1858         hash();
1859
1860     if (Options::dumpGeneratedBytecodes())
1861         dumpBytecode();
1862
1863     m_heap->m_codeBlocks.add(this);
1864     m_heap->reportExtraMemoryCost(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
1865 }
1866
1867 CodeBlock::~CodeBlock()
1868 {
1869     if (m_vm->m_perBytecodeProfiler)
1870         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
1871     
1872 #if ENABLE(VERBOSE_VALUE_PROFILE)
1873     dumpValueProfiles();
1874 #endif
1875
1876 #if ENABLE(LLINT)    
1877     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1878         m_incomingLLIntCalls.begin()->remove();
1879 #endif // ENABLE(LLINT)
1880 #if ENABLE(JIT)
1881     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
1882     // Consider that two CodeBlocks become unreachable at the same time. There
1883     // is no guarantee about the order in which the CodeBlocks are destroyed.
1884     // So, if we don't remove incoming calls, and get destroyed before the
1885     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
1886     // destructor will try to remove nodes from our (no longer valid) linked list.
1887     while (m_incomingCalls.begin() != m_incomingCalls.end())
1888         m_incomingCalls.begin()->remove();
1889     
1890     // Note that our outgoing calls will be removed from other CodeBlocks'
1891     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
1892     // destructors.
1893
1894     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
1895         (*iter)->deref();
1896 #endif // ENABLE(JIT)
1897 }
1898
1899 void CodeBlock::setNumParameters(int newValue)
1900 {
1901     m_numParameters = newValue;
1902
1903     m_argumentValueProfiles.resizeToFit(newValue);
1904 }
1905
1906 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
1907 {
1908     EvalCacheMap::iterator end = m_cacheMap.end();
1909     for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
1910         visitor.append(&ptr->value);
1911 }
1912
1913 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
1914 {
1915 #if ENABLE(FTL_JIT)
1916     if (jitType() != JITCode::DFGJIT)
1917         return 0;
1918     DFG::JITCode* jitCode = m_jitCode->dfg();
1919     return jitCode->osrEntryBlock.get();
1920 #else // ENABLE(FTL_JIT)
1921     return 0;
1922 #endif // ENABLE(FTL_JIT)
1923 }
1924
1925 void CodeBlock::visitAggregate(SlotVisitor& visitor)
1926 {
1927 #if ENABLE(PARALLEL_GC)
1928     // I may be asked to scan myself more than once, and it may even happen concurrently.
1929     // To this end, use a CAS loop to check if I've been called already. Only one thread
1930     // may proceed past this point - whichever one wins the CAS race.
1931     unsigned oldValue;
1932     do {
1933         oldValue = m_visitAggregateHasBeenCalled;
1934         if (oldValue) {
1935             // Looks like someone else won! Return immediately to ensure that we don't
1936             // trace the same CodeBlock concurrently. Doing so is hazardous since we will
1937             // be mutating the state of ValueProfiles, which contain JSValues, which can
1938             // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
1939             // that are nearly impossible to track down.
1940             
1941             // Also note that it must be safe to return early as soon as we see the
1942             // value true (well, (unsigned)1), since once a GC thread is in this method
1943             // and has won the CAS race (i.e. was responsible for setting the value true)
1944             // it will definitely complete the rest of this method before declaring
1945             // termination.
1946             return;
1947         }
1948     } while (!WTF::weakCompareAndSwap(&m_visitAggregateHasBeenCalled, 0, 1));
1949 #endif // ENABLE(PARALLEL_GC)
1950     
1951     if (!!m_alternative)
1952         m_alternative->visitAggregate(visitor);
1953     
1954     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
1955         otherBlock->visitAggregate(visitor);
1956
1957     visitor.reportExtraMemoryUsage(sizeof(CodeBlock));
1958     if (m_jitCode)
1959         visitor.reportExtraMemoryUsage(m_jitCode->size());
1960     if (m_instructions.size()) {
1961         // Divide by refCount() because m_instructions points to something that is shared
1962         // by multiple CodeBlocks, and we only want to count it towards the heap size once.
1963         // Having each CodeBlock report only its proportional share of the size is one way
1964         // of accomplishing this.
1965         visitor.reportExtraMemoryUsage(m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
1966     }
1967
1968     visitor.append(&m_unlinkedCode);
1969
1970     // There are three things that may use unconditional finalizers: lazy bytecode freeing,
1971     // inline cache clearing, and jettisoning. The probability of us wanting to do at
1972     // least one of those things is probably quite close to 1. So we add one no matter what
1973     // and when it runs, it figures out whether it has any work to do.
1974     visitor.addUnconditionalFinalizer(this);
1975     
1976     // There are two things that we use weak reference harvesters for: DFG fixpoint for
1977     // jettisoning, and trying to find structures that would be live based on some
1978     // inline cache. So it makes sense to register them regardless.
1979     visitor.addWeakReferenceHarvester(this);
1980     m_allTransitionsHaveBeenMarked = false;
1981     
1982     if (shouldImmediatelyAssumeLivenessDuringScan()) {
1983         // This code block is live, so scan all references strongly and return.
1984         stronglyVisitStrongReferences(visitor);
1985         stronglyVisitWeakReferences(visitor);
1986         propagateTransitions(visitor);
1987         return;
1988     }
1989     
1990 #if ENABLE(DFG_JIT)
1991     // We get here if we're live in the sense that our owner executable is live,
1992     // but we're not yet live for sure in another sense: we may yet decide that this
1993     // code block should be jettisoned based on its outgoing weak references being
1994     // stale. Set a flag to indicate that we're still assuming that we're dead, and
1995     // perform one round of determining if we're live. The GC may determine, based on
1996     // either us marking additional objects, or by other objects being marked for
1997     // other reasons, that this iteration should run again; it will notify us of this
1998     // decision by calling harvestWeakReferences().
1999     
2000     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
2001     
2002     propagateTransitions(visitor);
2003     determineLiveness(visitor);
2004 #else // ENABLE(DFG_JIT)
2005     RELEASE_ASSERT_NOT_REACHED();
2006 #endif // ENABLE(DFG_JIT)
2007 }
2008
2009 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
2010 {
2011     UNUSED_PARAM(visitor);
2012
2013     if (m_allTransitionsHaveBeenMarked)
2014         return;
2015
2016     bool allAreMarkedSoFar = true;
2017         
2018 #if ENABLE(LLINT)
2019     Interpreter* interpreter = m_vm->interpreter;
2020     if (jitType() == JITCode::InterpreterThunk) {
2021         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2022         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2023             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2024             switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2025             case op_put_by_id_transition_direct:
2026             case op_put_by_id_transition_normal:
2027             case op_put_by_id_transition_direct_out_of_line:
2028             case op_put_by_id_transition_normal_out_of_line: {
2029                 if (Heap::isMarked(instruction[4].u.structure.get()))
2030                     visitor.append(&instruction[6].u.structure);
2031                 else
2032                     allAreMarkedSoFar = false;
2033                 break;
2034             }
2035             default:
2036                 break;
2037             }
2038         }
2039     }
2040 #endif // ENABLE(LLINT)
2041
2042 #if ENABLE(JIT)
2043     if (JITCode::isJIT(jitType())) {
2044         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2045             StructureStubInfo& stubInfo = **iter;
2046             switch (stubInfo.accessType) {
2047             case access_put_by_id_transition_normal:
2048             case access_put_by_id_transition_direct: {
2049                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2050                 if ((!origin || Heap::isMarked(origin))
2051                     && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
2052                     visitor.append(&stubInfo.u.putByIdTransition.structure);
2053                 else
2054                     allAreMarkedSoFar = false;
2055                 break;
2056             }
2057
2058             case access_put_by_id_list: {
2059                 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
2060                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2061                 if (origin && !Heap::isMarked(origin)) {
2062                     allAreMarkedSoFar = false;
2063                     break;
2064                 }
2065                 for (unsigned j = list->size(); j--;) {
2066                     PutByIdAccess& access = list->m_list[j];
2067                     if (!access.isTransition())
2068                         continue;
2069                     if (Heap::isMarked(access.oldStructure()))
2070                         visitor.append(&access.m_newStructure);
2071                     else
2072                         allAreMarkedSoFar = false;
2073                 }
2074                 break;
2075             }
2076             
2077             default:
2078                 break;
2079             }
2080         }
2081     }
2082 #endif // ENABLE(JIT)
2083     
2084 #if ENABLE(DFG_JIT)
2085     if (JITCode::isOptimizingJIT(jitType())) {
2086         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2087         for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2088             if ((!dfgCommon->transitions[i].m_codeOrigin
2089                  || Heap::isMarked(dfgCommon->transitions[i].m_codeOrigin.get()))
2090                 && Heap::isMarked(dfgCommon->transitions[i].m_from.get())) {
2091                 // If the following three things are live, then the target of the
2092                 // transition is also live:
2093                 // - This code block. We know it's live already because otherwise
2094                 //   we wouldn't be scanning ourselves.
2095                 // - The code origin of the transition. Transitions may arise from
2096                 //   code that was inlined. They are not relevant if the user's
2097                 //   object that is required for the inlinee to run is no longer
2098                 //   live.
2099                 // - The source of the transition. The transition checks if some
2100                 //   heap location holds the source, and if so, stores the target.
2101                 //   Hence the source must be live for the transition to be live.
2102                 visitor.append(&dfgCommon->transitions[i].m_to);
2103             } else
2104                 allAreMarkedSoFar = false;
2105         }
2106     }
2107 #endif // ENABLE(DFG_JIT)
2108     
2109     if (allAreMarkedSoFar)
2110         m_allTransitionsHaveBeenMarked = true;
2111 }
2112
2113 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2114 {
2115     UNUSED_PARAM(visitor);
2116     
2117     if (shouldImmediatelyAssumeLivenessDuringScan())
2118         return;
2119     
2120 #if ENABLE(DFG_JIT)
2121     // Check if we have any remaining work to do.
2122     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2123     if (dfgCommon->livenessHasBeenProved)
2124         return;
2125     
2126     // Now check all of our weak references. If all of them are live, then we
2127     // have proved liveness and so we scan our strong references. If at end of
2128     // GC we still have not proved liveness, then this code block is toast.
2129     bool allAreLiveSoFar = true;
2130     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2131         if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2132             allAreLiveSoFar = false;
2133             break;
2134         }
2135     }
2136     
2137     // If some weak references are dead, then this fixpoint iteration was
2138     // unsuccessful.
2139     if (!allAreLiveSoFar)
2140         return;
2141     
2142     // All weak references are live. Record this information so we don't
2143     // come back here again, and scan the strong references.
2144     dfgCommon->livenessHasBeenProved = true;
2145     stronglyVisitStrongReferences(visitor);
2146 #endif // ENABLE(DFG_JIT)
2147 }
2148
2149 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2150 {
2151     propagateTransitions(visitor);
2152     determineLiveness(visitor);
2153 }
2154
2155 void CodeBlock::finalizeUnconditionally()
2156 {
2157     Interpreter* interpreter = m_vm->interpreter;
2158     if (JITCode::couldBeInterpreted(jitType())) {
2159         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2160         for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2161             Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2162             switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2163             case op_get_by_id:
2164             case op_get_by_id_out_of_line:
2165             case op_put_by_id:
2166             case op_put_by_id_out_of_line:
2167                 if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2168                     break;
2169                 if (Options::verboseOSR())
2170                     dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2171                 curInstruction[4].u.structure.clear();
2172                 curInstruction[5].u.operand = 0;
2173                 break;
2174             case op_put_by_id_transition_direct:
2175             case op_put_by_id_transition_normal:
2176             case op_put_by_id_transition_direct_out_of_line:
2177             case op_put_by_id_transition_normal_out_of_line:
2178                 if (Heap::isMarked(curInstruction[4].u.structure.get())
2179                     && Heap::isMarked(curInstruction[6].u.structure.get())
2180                     && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2181                     break;
2182                 if (Options::verboseOSR()) {
2183                     dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2184                             curInstruction[4].u.structure.get(),
2185                             curInstruction[6].u.structure.get(),
2186                             curInstruction[7].u.structureChain.get());
2187                 }
2188                 curInstruction[4].u.structure.clear();
2189                 curInstruction[6].u.structure.clear();
2190                 curInstruction[7].u.structureChain.clear();
2191                 curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2192                 break;
2193             case op_get_array_length:
2194                 break;
2195             case op_to_this:
2196                 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2197                     break;
2198                 if (Options::verboseOSR())
2199                     dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2200                 curInstruction[2].u.structure.clear();
2201                 break;
2202             case op_get_callee:
2203                 if (!curInstruction[2].u.jsCell || Heap::isMarked(curInstruction[2].u.jsCell.get()))
2204                     break;
2205                 if (Options::verboseOSR())
2206                     dataLogF("Clearing LLInt get callee with function %p.\n", curInstruction[2].u.jsCell.get());
2207                 curInstruction[2].u.jsCell.clear();
2208                 break;
2209             case op_resolve_scope: {
2210                 WriteBarrierBase<JSActivation>& activation = curInstruction[5].u.activation;
2211                 if (!activation || Heap::isMarked(activation.get()))
2212                     break;
2213                 if (Options::verboseOSR())
2214                     dataLogF("Clearing dead activation %p.\n", activation.get());
2215                 activation.clear();
2216                 break;
2217             }
2218             case op_get_from_scope:
2219             case op_put_to_scope: {
2220                 ResolveModeAndType modeAndType =
2221                     ResolveModeAndType(curInstruction[4].u.operand);
2222                 if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks)
2223                     continue;
2224                 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2225                 if (!structure || Heap::isMarked(structure.get()))
2226                     break;
2227                 if (Options::verboseOSR())
2228                     dataLogF("Clearing scope access with structure %p.\n", structure.get());
2229                 structure.clear();
2230                 break;
2231             }
2232             default:
2233                 RELEASE_ASSERT_NOT_REACHED();
2234             }
2235         }
2236
2237 #if ENABLE(LLINT)
2238         for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2239             if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2240                 if (Options::verboseOSR())
2241                     dataLog("Clearing LLInt call from ", *this, "\n");
2242                 m_llintCallLinkInfos[i].unlink();
2243             }
2244             if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2245                 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2246         }
2247 #endif // ENABLE(LLINT)
2248     }
2249
2250 #if ENABLE(DFG_JIT)
2251     // Check if we're not live. If we are, then jettison.
2252     if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_jitCode->dfgCommon()->livenessHasBeenProved)) {
2253         if (Options::verboseOSR())
2254             dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2255
2256         if (DFG::shouldShowDisassembly()) {
2257             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2258             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2259             for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2260                 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
2261                 JSCell* origin = transition.m_codeOrigin.get();
2262                 JSCell* from = transition.m_from.get();
2263                 JSCell* to = transition.m_to.get();
2264                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2265                     continue;
2266                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2267             }
2268             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2269                 JSCell* weak = dfgCommon->weakReferences[i].get();
2270                 if (Heap::isMarked(weak))
2271                     continue;
2272                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2273             }
2274         }
2275         
2276         jettison();
2277         return;
2278     }
2279 #endif // ENABLE(DFG_JIT)
2280
2281 #if ENABLE(JIT)
2282     // Handle inline caches.
2283     if (!!jitCode()) {
2284         RepatchBuffer repatchBuffer(this);
2285         for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
2286             if (callLinkInfo(i).isLinked()) {
2287                 if (ClosureCallStubRoutine* stub = callLinkInfo(i).stub.get()) {
2288                     if (!Heap::isMarked(stub->structure())
2289                         || !Heap::isMarked(stub->executable())) {
2290                         if (Options::verboseOSR()) {
2291                             dataLog(
2292                                 "Clearing closure call from ", *this, " to ",
2293                                 stub->executable()->hashFor(callLinkInfo(i).specializationKind()),
2294                                 ", stub routine ", RawPointer(stub), ".\n");
2295                         }
2296                         callLinkInfo(i).unlink(*m_vm, repatchBuffer);
2297                     }
2298                 } else if (!Heap::isMarked(callLinkInfo(i).callee.get())) {
2299                     if (Options::verboseOSR()) {
2300                         dataLog(
2301                             "Clearing call from ", *this, " to ",
2302                             RawPointer(callLinkInfo(i).callee.get()), " (",
2303                             callLinkInfo(i).callee.get()->executable()->hashFor(
2304                                 callLinkInfo(i).specializationKind()),
2305                             ").\n");
2306                     }
2307                     callLinkInfo(i).unlink(*m_vm, repatchBuffer);
2308                 }
2309             }
2310             if (!!callLinkInfo(i).lastSeenCallee
2311                 && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
2312                 callLinkInfo(i).lastSeenCallee.clear();
2313         }
2314         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2315             StructureStubInfo& stubInfo = **iter;
2316             
2317             if (stubInfo.visitWeakReferences())
2318                 continue;
2319             
2320             resetStubDuringGCInternal(repatchBuffer, stubInfo);
2321         }
2322     }
2323 #endif
2324 }
2325
2326 #if ENABLE(JIT)
2327 StructureStubInfo* CodeBlock::addStubInfo()
2328 {
2329     ConcurrentJITLocker locker(m_lock);
2330     return m_stubInfos.add();
2331 }
2332
2333 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
2334 {
2335     toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2336 }
2337
2338 void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2339 {
2340     if (stubInfo.accessType == access_unset)
2341         return;
2342     
2343     ConcurrentJITLocker locker(m_lock);
2344     
2345     RepatchBuffer repatchBuffer(this);
2346     resetStubInternal(repatchBuffer, stubInfo);
2347 }
2348
2349 void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2350 {
2351     AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2352     
2353     if (Options::verboseOSR()) {
2354         // This can be called from GC destructor calls, so we don't try to do a full dump
2355         // of the CodeBlock.
2356         dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
2357     }
2358     
2359     RELEASE_ASSERT(JITCode::isJIT(jitType()));
2360     
2361     if (isGetByIdAccess(accessType))
2362         resetGetByID(repatchBuffer, stubInfo);
2363     else if (isPutByIdAccess(accessType))
2364         resetPutByID(repatchBuffer, stubInfo);
2365     else {
2366         RELEASE_ASSERT(isInAccess(accessType));
2367         resetIn(repatchBuffer, stubInfo);
2368     }
2369     
2370     stubInfo.reset();
2371 }
2372
2373 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2374 {
2375     resetStubInternal(repatchBuffer, stubInfo);
2376     stubInfo.resetByGC = true;
2377 }
2378 #endif
2379
2380 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2381 {
2382     visitor.append(&m_globalObject);
2383     visitor.append(&m_ownerExecutable);
2384     visitor.append(&m_symbolTable);
2385     visitor.append(&m_unlinkedCode);
2386     if (m_rareData)
2387         m_rareData->m_evalCodeCache.visitAggregate(visitor);
2388     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2389     for (size_t i = 0; i < m_functionExprs.size(); ++i)
2390         visitor.append(&m_functionExprs[i]);
2391     for (size_t i = 0; i < m_functionDecls.size(); ++i)
2392         visitor.append(&m_functionDecls[i]);
2393     for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2394         m_objectAllocationProfiles[i].visitAggregate(visitor);
2395
2396     updateAllPredictions();
2397 }
2398
2399 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2400 {
2401     UNUSED_PARAM(visitor);
2402
2403 #if ENABLE(DFG_JIT)
2404     if (!JITCode::isOptimizingJIT(jitType()))
2405         return;
2406     
2407     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2408
2409     for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2410         if (!!dfgCommon->transitions[i].m_codeOrigin)
2411             visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2412         visitor.append(&dfgCommon->transitions[i].m_from);
2413         visitor.append(&dfgCommon->transitions[i].m_to);
2414     }
2415     
2416     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2417         visitor.append(&dfgCommon->weakReferences[i]);
2418 #endif    
2419 }
2420
2421 CodeBlock* CodeBlock::baselineAlternative()
2422 {
2423 #if ENABLE(JIT)
2424     CodeBlock* result = this;
2425     while (result->alternative())
2426         result = result->alternative();
2427     RELEASE_ASSERT(result);
2428     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
2429     return result;
2430 #else
2431     return this;
2432 #endif
2433 }
2434
2435 CodeBlock* CodeBlock::baselineVersion()
2436 {
2437 #if ENABLE(JIT)
2438     if (JITCode::isBaselineCode(jitType()))
2439         return this;
2440     CodeBlock* result = replacement();
2441     if (!result) {
2442         // This can happen if we're creating the original CodeBlock for an executable.
2443         // Assume that we're the baseline CodeBlock.
2444         RELEASE_ASSERT(jitType() == JITCode::None);
2445         return this;
2446     }
2447     result = result->baselineAlternative();
2448     return result;
2449 #else
2450     return this;
2451 #endif
2452 }
2453
2454 #if ENABLE(JIT)
2455 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
2456 {
2457     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
2458 }
2459
2460 bool CodeBlock::hasOptimizedReplacement()
2461 {
2462     return hasOptimizedReplacement(jitType());
2463 }
2464 #endif
2465
2466 bool CodeBlock::isCaptured(VirtualRegister operand, InlineCallFrame* inlineCallFrame) const
2467 {
2468     if (operand.isArgument())
2469         return operand.toArgument() && usesArguments();
2470
2471     if (inlineCallFrame)
2472         return inlineCallFrame->capturedVars.get(operand.toLocal());
2473
2474     // The activation object isn't in the captured region, but it's "captured"
2475     // in the sense that stores to its location can be observed indirectly.
2476     if (needsActivation() && operand == activationRegister())
2477         return true;
2478
2479     // Ditto for the arguments object.
2480     if (usesArguments() && operand == argumentsRegister())
2481         return true;
2482
2483     // Ditto for the arguments object.
2484     if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
2485         return true;
2486
2487     // We're in global code so there are no locals to capture
2488     if (!symbolTable())
2489         return false;
2490
2491     return symbolTable()->isCaptured(operand.offset());
2492 }
2493
2494 int CodeBlock::framePointerOffsetToGetActivationRegisters(int machineCaptureStart)
2495 {
2496     // We'll be adding this to the stack pointer to get a registers pointer that looks
2497     // like it would have looked in the baseline engine. For example, if bytecode would
2498     // have put the first captured variable at offset -5 but we put it at offset -1, then
2499     // we'll have an offset of 4.
2500     int32_t offset = 0;
2501     
2502     // Compute where we put the captured variables. This offset will point the registers
2503     // pointer directly at the first captured var.
2504     offset += machineCaptureStart;
2505     
2506     // Now compute the offset needed to make the runtime see the captured variables at the
2507     // same offset that the bytecode would have used.
2508     offset -= symbolTable()->captureStart();
2509     
2510     return offset;
2511 }
2512
2513 int CodeBlock::framePointerOffsetToGetActivationRegisters()
2514 {
2515     if (!JITCode::isOptimizingJIT(jitType()))
2516         return 0;
2517 #if ENABLE(DFG_JIT)
2518     return framePointerOffsetToGetActivationRegisters(jitCode()->dfgCommon()->machineCaptureStart);
2519 #else
2520     RELEASE_ASSERT_NOT_REACHED();
2521     return 0;
2522 #endif
2523 }
2524
2525 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
2526 {
2527     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2528
2529     if (!m_rareData)
2530         return 0;
2531     
2532     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2533     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2534         // Handlers are ordered innermost first, so the first handler we encounter
2535         // that contains the source address is the correct handler to use.
2536         if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
2537             return &exceptionHandlers[i];
2538     }
2539
2540     return 0;
2541 }
2542
2543 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2544 {
2545     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2546     return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2547 }
2548
2549 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2550 {
2551     int divot;
2552     int startOffset;
2553     int endOffset;
2554     unsigned line;
2555     unsigned column;
2556     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2557     return column;
2558 }
2559
2560 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2561 {
2562     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2563     divot += m_sourceOffset;
2564     column += line ? 1 : firstLineColumnOffset();
2565     line += m_ownerExecutable->lineNo();
2566 }
2567
2568 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2569 {
2570 #if ENABLE(JIT)
2571     m_callLinkInfos.shrinkToFit();
2572 #endif
2573     m_rareCaseProfiles.shrinkToFit();
2574     m_specialFastCaseProfiles.shrinkToFit();
2575     
2576     if (shrinkMode == EarlyShrink) {
2577         m_additionalIdentifiers.shrinkToFit();
2578         m_functionDecls.shrinkToFit();
2579         m_functionExprs.shrinkToFit();
2580         m_constantRegisters.shrinkToFit();
2581         
2582         if (m_rareData) {
2583             m_rareData->m_switchJumpTables.shrinkToFit();
2584             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2585         }
2586     } // else don't shrink these, because we would have already pointed pointers into these tables.
2587
2588     if (m_rareData)
2589         m_rareData->m_exceptionHandlers.shrinkToFit();
2590 }
2591
2592 void CodeBlock::createActivation(CallFrame* callFrame)
2593 {
2594     ASSERT(codeType() == FunctionCode);
2595     ASSERT(needsFullScopeChain());
2596     ASSERT(!callFrame->uncheckedR(activationRegister().offset()).jsValue());
2597     JSActivation* activation = JSActivation::create(callFrame->vm(), callFrame, this);
2598     callFrame->uncheckedR(activationRegister().offset()) = JSValue(activation);
2599     callFrame->setScope(activation);
2600 }
2601
2602 unsigned CodeBlock::addOrFindConstant(JSValue v)
2603 {
2604     unsigned result;
2605     if (findConstant(v, result))
2606         return result;
2607     return addConstant(v);
2608 }
2609
2610 bool CodeBlock::findConstant(JSValue v, unsigned& index)
2611 {
2612     unsigned numberOfConstants = numberOfConstantRegisters();
2613     for (unsigned i = 0; i < numberOfConstants; ++i) {
2614         if (getConstant(FirstConstantRegisterIndex + i) == v) {
2615             index = i;
2616             return true;
2617         }
2618     }
2619     index = numberOfConstants;
2620     return false;
2621 }
2622
2623 #if ENABLE(JIT)
2624 void CodeBlock::unlinkCalls()
2625 {
2626     if (!!m_alternative)
2627         m_alternative->unlinkCalls();
2628 #if ENABLE(LLINT)
2629     for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2630         if (m_llintCallLinkInfos[i].isLinked())
2631             m_llintCallLinkInfos[i].unlink();
2632     }
2633 #endif
2634     if (!m_callLinkInfos.size())
2635         return;
2636     if (!m_vm->canUseJIT())
2637         return;
2638     RepatchBuffer repatchBuffer(this);
2639     for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
2640         if (!m_callLinkInfos[i].isLinked())
2641             continue;
2642         m_callLinkInfos[i].unlink(*m_vm, repatchBuffer);
2643     }
2644 }
2645
2646 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
2647 {
2648     noticeIncomingCall(callerFrame);
2649     m_incomingCalls.push(incoming);
2650 }
2651 #endif // ENABLE(JIT)
2652
2653 void CodeBlock::unlinkIncomingCalls()
2654 {
2655 #if ENABLE(LLINT)
2656     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2657         m_incomingLLIntCalls.begin()->unlink();
2658 #endif // ENABLE(LLINT)
2659 #if ENABLE(JIT)
2660     if (m_incomingCalls.isEmpty())
2661         return;
2662     RepatchBuffer repatchBuffer(this);
2663     while (m_incomingCalls.begin() != m_incomingCalls.end())
2664         m_incomingCalls.begin()->unlink(*m_vm, repatchBuffer);
2665 #endif // ENABLE(JIT)
2666 }
2667
2668 #if ENABLE(LLINT)
2669 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
2670 {
2671     noticeIncomingCall(callerFrame);
2672     m_incomingLLIntCalls.push(incoming);
2673 }
2674 #endif // ENABLE(LLINT)
2675
2676 void CodeBlock::clearEvalCache()
2677 {
2678     if (!!m_alternative)
2679         m_alternative->clearEvalCache();
2680     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2681         otherBlock->clearEvalCache();
2682     if (!m_rareData)
2683         return;
2684     m_rareData->m_evalCodeCache.clear();
2685 }
2686
2687 template<typename T, size_t inlineCapacity, typename U, typename V>
2688 inline void replaceExistingEntries(Vector<T, inlineCapacity, U>& target, Vector<T, inlineCapacity, V>& source)
2689 {
2690     ASSERT(target.size() <= source.size());
2691     for (size_t i = 0; i < target.size(); ++i)
2692         target[i] = source[i];
2693 }
2694
2695 void CodeBlock::copyPostParseDataFrom(CodeBlock* alternative)
2696 {
2697     if (!alternative)
2698         return;
2699     
2700     replaceExistingEntries(m_constantRegisters, alternative->m_constantRegisters);
2701     replaceExistingEntries(m_functionDecls, alternative->m_functionDecls);
2702     replaceExistingEntries(m_functionExprs, alternative->m_functionExprs);
2703     if (!!m_rareData && !!alternative->m_rareData)
2704         replaceExistingEntries(m_rareData->m_constantBuffers, alternative->m_rareData->m_constantBuffers);
2705 }
2706
2707 void CodeBlock::copyPostParseDataFromAlternative()
2708 {
2709     copyPostParseDataFrom(m_alternative.get());
2710 }
2711
2712 void CodeBlock::install()
2713 {
2714     ownerExecutable()->installCode(this);
2715 }
2716
2717 PassRefPtr<CodeBlock> CodeBlock::newReplacement()
2718 {
2719     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
2720 }
2721
2722 const SlowArgument* CodeBlock::machineSlowArguments()
2723 {
2724     if (!JITCode::isOptimizingJIT(jitType()))
2725         return symbolTable()->slowArguments();
2726     
2727 #if ENABLE(DFG_JIT)
2728     return jitCode()->dfgCommon()->slowArguments.get();
2729 #else // ENABLE(DFG_JIT)
2730     return 0;
2731 #endif // ENABLE(DFG_JIT)
2732 }
2733
2734 #if ENABLE(JIT)
2735 CodeBlock* ProgramCodeBlock::replacement()
2736 {
2737     return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
2738 }
2739
2740 CodeBlock* EvalCodeBlock::replacement()
2741 {
2742     return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
2743 }
2744
2745 CodeBlock* FunctionCodeBlock::replacement()
2746 {
2747     return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
2748 }
2749
2750 DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
2751 {
2752     return DFG::programCapabilityLevel(this);
2753 }
2754
2755 DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
2756 {
2757     return DFG::evalCapabilityLevel(this);
2758 }
2759
2760 DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
2761 {
2762     if (m_isConstructor)
2763         return DFG::functionForConstructCapabilityLevel(this);
2764     return DFG::functionForCallCapabilityLevel(this);
2765 }
2766 #endif
2767
2768 void CodeBlock::jettison(ReoptimizationMode mode)
2769 {
2770 #if ENABLE(DFG_JIT)
2771     if (DFG::shouldShowDisassembly()) {
2772         dataLog("Jettisoning ", *this);
2773         if (mode == CountReoptimization)
2774             dataLog(" and counting reoptimization");
2775         dataLog(".\n");
2776     }
2777     
2778     DeferGCForAWhile deferGC(*m_heap);
2779     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
2780     
2781     // We want to accomplish two things here:
2782     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
2783     //    we should OSR exit at the top of the next bytecode instruction after the return.
2784     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2785     
2786     // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
2787     // whether the invalidation has already happened.
2788     if (!jitCode()->dfgCommon()->invalidate()) {
2789         // Nothing to do since we've already been invalidated. That means that we cannot be
2790         // the optimized replacement.
2791         RELEASE_ASSERT(this != replacement());
2792         return;
2793     }
2794     
2795     if (DFG::shouldShowDisassembly())
2796         dataLog("    Did invalidate ", *this, "\n");
2797     
2798     // Count the reoptimization if that's what the user wanted.
2799     if (mode == CountReoptimization) {
2800         // FIXME: Maybe this should call alternative().
2801         // https://bugs.webkit.org/show_bug.cgi?id=123677
2802         baselineAlternative()->countReoptimization();
2803         if (DFG::shouldShowDisassembly())
2804             dataLog("    Did count reoptimization for ", *this, "\n");
2805     }
2806     
2807     // Now take care of the entrypoint.
2808     if (this != replacement()) {
2809         // This means that we were never the entrypoint. This can happen for OSR entry code
2810         // blocks.
2811         return;
2812     }
2813     alternative()->optimizeAfterWarmUp();
2814     tallyFrequentExitSites();
2815     alternative()->install();
2816     if (DFG::shouldShowDisassembly())
2817         dataLog("    Did install baseline version of ", *this, "\n");
2818 #else // ENABLE(DFG_JIT)
2819     UNUSED_PARAM(mode);
2820     UNREACHABLE_FOR_PLATFORM();
2821 #endif // ENABLE(DFG_JIT)
2822 }
2823
2824 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2825 {
2826     if (!codeOrigin.inlineCallFrame)
2827         return globalObject();
2828     return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
2829 }
2830
2831 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2832 {
2833     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2834     
2835     if (Options::verboseCallLink())
2836         dataLog("Noticing call link from ", *callerCodeBlock, " to ", *this, "\n");
2837     
2838     if (!m_shouldAlwaysBeInlined)
2839         return;
2840
2841 #if ENABLE(DFG_JIT)
2842     if (!hasBaselineJITProfiling())
2843         return;
2844
2845     if (!DFG::mightInlineFunction(this))
2846         return;
2847
2848     if (!canInline(m_capabilityLevelState))
2849         return;
2850
2851     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2852         // If the caller is still in the interpreter, then we can't expect inlining to
2853         // happen anytime soon. Assume it's profitable to optimize it separately. This
2854         // ensures that a function is SABI only if it is called no more frequently than
2855         // any of its callers.
2856         m_shouldAlwaysBeInlined = false;
2857         if (Options::verboseCallLink())
2858             dataLog("    Marking SABI because caller is in LLInt.\n");
2859         return;
2860     }
2861     
2862     if (callerCodeBlock->codeType() != FunctionCode) {
2863         // If the caller is either eval or global code, assume that that won't be
2864         // optimized anytime soon. For eval code this is particularly true since we
2865         // delay eval optimization by a *lot*.
2866         m_shouldAlwaysBeInlined = false;
2867         if (Options::verboseCallLink())
2868             dataLog("    Marking SABI because caller is not a function.\n");
2869         return;
2870     }
2871     
2872     ExecState* frame = callerFrame;
2873     for (unsigned i = Options::maximumInliningDepth(); i--; frame = frame->callerFrame()) {
2874         if (frame->isVMEntrySentinel())
2875             break;
2876         if (frame->codeBlock() == this) {
2877             // Recursive calls won't be inlined.
2878             if (Options::verboseCallLink())
2879                 dataLog("    Marking SABI because recursion was detected.\n");
2880             m_shouldAlwaysBeInlined = false;
2881             return;
2882         }
2883     }
2884     
2885     RELEASE_ASSERT(callerCodeBlock->m_capabilityLevelState != DFG::CapabilityLevelNotSet);
2886     
2887     if (canCompile(callerCodeBlock->m_capabilityLevelState))
2888         return;
2889     
2890     if (Options::verboseCallLink())
2891         dataLog("    Marking SABI because the caller is not a DFG candidate.\n");
2892     
2893     m_shouldAlwaysBeInlined = false;
2894 #endif
2895 }
2896
2897 #if ENABLE(JIT)
2898 unsigned CodeBlock::reoptimizationRetryCounter() const
2899 {
2900     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2901     return m_reoptimizationRetryCounter;
2902 }
2903
2904 void CodeBlock::countReoptimization()
2905 {
2906     m_reoptimizationRetryCounter++;
2907     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2908         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2909 }
2910
2911 unsigned CodeBlock::numberOfDFGCompiles()
2912 {
2913     ASSERT(JITCode::isBaselineCode(jitType()));
2914     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2915 }
2916
2917 int32_t CodeBlock::codeTypeThresholdMultiplier() const
2918 {
2919     if (codeType() == EvalCode)
2920         return Options::evalThresholdMultiplier();
2921     
2922     return 1;
2923 }
2924
2925 double CodeBlock::optimizationThresholdScalingFactor()
2926 {
2927     // This expression arises from doing a least-squares fit of
2928     //
2929     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2930     //
2931     // against the data points:
2932     //
2933     //    x       F[x_]
2934     //    10       0.9          (smallest reasonable code block)
2935     //   200       1.0          (typical small-ish code block)
2936     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2937     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2938     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2939     // 10000       6.0          (similar to above)
2940     //
2941     // I achieve the minimization using the following Mathematica code:
2942     //
2943     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2944     //
2945     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2946     //
2947     // solution = 
2948     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2949     //         {a, b, c, d}][[2]]
2950     //
2951     // And the code below (to initialize a, b, c, d) is generated by:
2952     //
2953     // Print["const double " <> ToString[#[[1]]] <> " = " <>
2954     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2955     //
2956     // We've long known the following to be true:
2957     // - Small code blocks are cheap to optimize and so we should do it sooner rather
2958     //   than later.
2959     // - Large code blocks are expensive to optimize and so we should postpone doing so,
2960     //   and sometimes have a large enough threshold that we never optimize them.
2961     // - The difference in cost is not totally linear because (a) just invoking the
2962     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2963     //   in the correlation between instruction count and the actual compilation cost
2964     //   that for those large blocks, the instruction count should not have a strong
2965     //   influence on our threshold.
2966     //
2967     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2968     // example where the heuristics were right (code block in 3d-cube with instruction
2969     // count 320, which got compiled early as it should have been) and one where they were
2970     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2971     // to compile and didn't run often enough to warrant compilation in my opinion), and
2972     // then threw in additional data points that represented my own guess of what our
2973     // heuristics should do for some round-numbered examples.
2974     //
2975     // The expression to which I decided to fit the data arose because I started with an
2976     // affine function, and then did two things: put the linear part in an Abs to ensure
2977     // that the fit didn't end up choosing a negative value of c (which would result in
2978     // the function turning over and going negative for large x) and I threw in a Sqrt
2979     // term because Sqrt represents my intution that the function should be more sensitive
2980     // to small changes in small values of x, but less sensitive when x gets large.
2981     
2982     // Note that the current fit essentially eliminates the linear portion of the
2983     // expression (c == 0.0).
2984     const double a = 0.061504;
2985     const double b = 1.02406;
2986     const double c = 0.0;
2987     const double d = 0.825914;
2988     
2989     double instructionCount = this->instructionCount();
2990     
2991     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2992     
2993     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2994     if (Options::verboseOSR()) {
2995         dataLog(
2996             *this, ": instruction count is ", instructionCount,
2997             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
2998             "\n");
2999     }
3000     return result * codeTypeThresholdMultiplier();
3001 }
3002
3003 static int32_t clipThreshold(double threshold)
3004 {
3005     if (threshold < 1.0)
3006         return 1;
3007     
3008     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
3009         return std::numeric_limits<int32_t>::max();
3010     
3011     return static_cast<int32_t>(threshold);
3012 }
3013
3014 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
3015 {
3016     return clipThreshold(
3017         static_cast<double>(desiredThreshold) *
3018         optimizationThresholdScalingFactor() *
3019         (1 << reoptimizationRetryCounter()));
3020 }
3021
3022 bool CodeBlock::checkIfOptimizationThresholdReached()
3023 {
3024 #if ENABLE(DFG_JIT)
3025     if (DFG::Worklist* worklist = m_vm->worklist.get()) {
3026         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
3027             == DFG::Worklist::Compiled) {
3028             optimizeNextInvocation();
3029             return true;
3030         }
3031     }
3032 #endif
3033     
3034     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
3035 }
3036
3037 void CodeBlock::optimizeNextInvocation()
3038 {
3039     if (Options::verboseOSR())
3040         dataLog(*this, ": Optimizing next invocation.\n");
3041     m_jitExecuteCounter.setNewThreshold(0, this);
3042 }
3043
3044 void CodeBlock::dontOptimizeAnytimeSoon()
3045 {
3046     if (Options::verboseOSR())
3047         dataLog(*this, ": Not optimizing anytime soon.\n");
3048     m_jitExecuteCounter.deferIndefinitely();
3049 }
3050
3051 void CodeBlock::optimizeAfterWarmUp()
3052 {
3053     if (Options::verboseOSR())
3054         dataLog(*this, ": Optimizing after warm-up.\n");
3055 #if ENABLE(DFG_JIT)
3056     m_jitExecuteCounter.setNewThreshold(
3057         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
3058 #endif
3059 }
3060
3061 void CodeBlock::optimizeAfterLongWarmUp()
3062 {
3063     if (Options::verboseOSR())
3064         dataLog(*this, ": Optimizing after long warm-up.\n");
3065 #if ENABLE(DFG_JIT)
3066     m_jitExecuteCounter.setNewThreshold(
3067         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
3068 #endif
3069 }
3070
3071 void CodeBlock::optimizeSoon()
3072 {
3073     if (Options::verboseOSR())
3074         dataLog(*this, ": Optimizing soon.\n");
3075 #if ENABLE(DFG_JIT)
3076     m_jitExecuteCounter.setNewThreshold(
3077         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
3078 #endif
3079 }
3080
3081 void CodeBlock::forceOptimizationSlowPathConcurrently()
3082 {
3083     if (Options::verboseOSR())
3084         dataLog(*this, ": Forcing slow path concurrently.\n");
3085     m_jitExecuteCounter.forceSlowPathConcurrently();
3086 }
3087
3088 #if ENABLE(DFG_JIT)
3089 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
3090 {
3091     RELEASE_ASSERT(jitType() == JITCode::BaselineJIT);
3092     RELEASE_ASSERT((result == CompilationSuccessful) == (replacement() != this));
3093     switch (result) {
3094     case CompilationSuccessful:
3095         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3096         optimizeNextInvocation();
3097         return;
3098     case CompilationFailed:
3099         dontOptimizeAnytimeSoon();
3100         return;
3101     case CompilationDeferred:
3102         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3103         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3104         // necessarily guarantee anything. So, we make sure that even if that
3105         // function ends up being a no-op, we still eventually retry and realize
3106         // that we have optimized code ready.
3107         optimizeAfterWarmUp();
3108         return;
3109     case CompilationInvalidated:
3110         // Retry with exponential backoff.
3111         countReoptimization();
3112         optimizeAfterWarmUp();
3113         return;
3114     }
3115     RELEASE_ASSERT_NOT_REACHED();
3116 }
3117
3118 #endif
3119     
3120 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
3121 {
3122     ASSERT(JITCode::isOptimizingJIT(jitType()));
3123     // Compute this the lame way so we don't saturate. This is called infrequently
3124     // enough that this loop won't hurt us.
3125     unsigned result = desiredThreshold;
3126     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
3127         unsigned newResult = result << 1;
3128         if (newResult < result)
3129             return std::numeric_limits<uint32_t>::max();
3130         result = newResult;
3131     }
3132     return result;
3133 }
3134
3135 uint32_t CodeBlock::exitCountThresholdForReoptimization()
3136 {
3137     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3138 }
3139
3140 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3141 {
3142     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3143 }
3144
3145 bool CodeBlock::shouldReoptimizeNow()
3146 {
3147     return osrExitCounter() >= exitCountThresholdForReoptimization();
3148 }
3149
3150 bool CodeBlock::shouldReoptimizeFromLoopNow()
3151 {
3152     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3153 }
3154 #endif
3155
3156 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
3157 {
3158     for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
3159         if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
3160             return &m_arrayProfiles[i];
3161     }
3162     return 0;
3163 }
3164
3165 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
3166 {
3167     ArrayProfile* result = getArrayProfile(bytecodeOffset);
3168     if (result)
3169         return result;
3170     return addArrayProfile(bytecodeOffset);
3171 }
3172
3173 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
3174 {
3175     ConcurrentJITLocker locker(m_lock);
3176     
3177     numberOfLiveNonArgumentValueProfiles = 0;
3178     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3179     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3180         ValueProfile* profile = getFromAllValueProfiles(i);
3181         unsigned numSamples = profile->totalNumberOfSamples();
3182         if (numSamples > ValueProfile::numberOfBuckets)
3183             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
3184         numberOfSamplesInProfiles += numSamples;
3185         if (profile->m_bytecodeOffset < 0) {
3186             profile->computeUpdatedPrediction(locker);
3187             continue;
3188         }
3189         if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
3190             numberOfLiveNonArgumentValueProfiles++;
3191         profile->computeUpdatedPrediction(locker);
3192     }
3193     
3194 #if ENABLE(DFG_JIT)
3195     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
3196 #endif
3197 }
3198
3199 void CodeBlock::updateAllValueProfilePredictions()
3200 {
3201     unsigned ignoredValue1, ignoredValue2;
3202     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
3203 }
3204
3205 void CodeBlock::updateAllArrayPredictions()
3206 {
3207     ConcurrentJITLocker locker(m_lock);
3208     
3209     for (unsigned i = m_arrayProfiles.size(); i--;)
3210         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
3211     
3212     // Don't count these either, for similar reasons.
3213     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
3214         m_arrayAllocationProfiles[i].updateIndexingType();
3215 }
3216
3217 void CodeBlock::updateAllPredictions()
3218 {
3219     updateAllValueProfilePredictions();
3220     updateAllArrayPredictions();
3221 }
3222
3223 bool CodeBlock::shouldOptimizeNow()
3224 {
3225     if (Options::verboseOSR())
3226         dataLog("Considering optimizing ", *this, "...\n");
3227
3228     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
3229         return true;
3230     
3231     updateAllArrayPredictions();
3232     
3233     unsigned numberOfLiveNonArgumentValueProfiles;
3234     unsigned numberOfSamplesInProfiles;
3235     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
3236
3237     if (Options::verboseOSR()) {
3238         dataLogF(
3239             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
3240             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
3241             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
3242             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
3243             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
3244     }
3245
3246     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
3247         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
3248         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
3249         return true;
3250     
3251     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
3252     m_optimizationDelayCounter++;
3253     optimizeAfterWarmUp();
3254     return false;
3255 }
3256
3257 #if ENABLE(DFG_JIT)
3258 void CodeBlock::tallyFrequentExitSites()
3259 {
3260     ASSERT(JITCode::isOptimizingJIT(jitType()));
3261     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
3262     
3263     CodeBlock* profiledBlock = alternative();
3264     
3265     switch (jitType()) {
3266     case JITCode::DFGJIT: {
3267         DFG::JITCode* jitCode = m_jitCode->dfg();
3268         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3269             DFG::OSRExit& exit = jitCode->osrExit[i];
3270             
3271             if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
3272                 continue;
3273         }
3274         break;
3275     }
3276
3277 #if ENABLE(FTL_JIT)
3278     case JITCode::FTLJIT: {
3279         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
3280         // vector contains a totally different type, that just so happens to behave like
3281         // DFG::JITCode::osrExit.
3282         FTL::JITCode* jitCode = m_jitCode->ftl();
3283         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3284             FTL::OSRExit& exit = jitCode->osrExit[i];
3285             
3286             if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
3287                 continue;
3288         }
3289         break;
3290     }
3291 #endif
3292         
3293     default:
3294         RELEASE_ASSERT_NOT_REACHED();
3295         break;
3296     }
3297 }
3298 #endif // ENABLE(DFG_JIT)
3299
3300 #if ENABLE(VERBOSE_VALUE_PROFILE)