438dd3c5cae095cc1c694c0e0a35744952d747f0
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "BasicBlockLocation.h"
34 #include "BytecodeGenerator.h"
35 #include "BytecodeUseDef.h"
36 #include "CallLinkStatus.h"
37 #include "DFGCapabilities.h"
38 #include "DFGCommon.h"
39 #include "DFGDriver.h"
40 #include "DFGJITCode.h"
41 #include "DFGWorklist.h"
42 #include "Debugger.h"
43 #include "FunctionExecutableDump.h"
44 #include "Interpreter.h"
45 #include "JIT.h"
46 #include "JITStubs.h"
47 #include "JSCJSValue.h"
48 #include "JSFunction.h"
49 #include "JSLexicalEnvironment.h"
50 #include "JSNameScope.h"
51 #include "LLIntEntrypoint.h"
52 #include "LowLevelInterpreter.h"
53 #include "JSCInlines.h"
54 #include "PolymorphicGetByIdList.h"
55 #include "PolymorphicPutByIdList.h"
56 #include "ProfilerDatabase.h"
57 #include "ReduceWhitespace.h"
58 #include "Repatch.h"
59 #include "RepatchBuffer.h"
60 #include "SlotVisitorInlines.h"
61 #include "StackVisitor.h"
62 #include "TypeLocationCache.h"
63 #include "TypeProfiler.h"
64 #include "UnlinkedInstructionStream.h"
65 #include <wtf/BagToHashMap.h>
66 #include <wtf/CommaPrinter.h>
67 #include <wtf/StringExtras.h>
68 #include <wtf/StringPrintStream.h>
69
70 #if ENABLE(DFG_JIT)
71 #include "DFGOperations.h"
72 #endif
73
74 #if ENABLE(FTL_JIT)
75 #include "FTLJITCode.h"
76 #endif
77
78 namespace JSC {
79
80 CString CodeBlock::inferredName() const
81 {
82     switch (codeType()) {
83     case GlobalCode:
84         return "<global>";
85     case EvalCode:
86         return "<eval>";
87     case FunctionCode:
88         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
89     default:
90         CRASH();
91         return CString("", 0);
92     }
93 }
94
95 bool CodeBlock::hasHash() const
96 {
97     return !!m_hash;
98 }
99
100 bool CodeBlock::isSafeToComputeHash() const
101 {
102     return !isCompilationThread();
103 }
104
105 CodeBlockHash CodeBlock::hash() const
106 {
107     if (!m_hash) {
108         RELEASE_ASSERT(isSafeToComputeHash());
109         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
110     }
111     return m_hash;
112 }
113
114 CString CodeBlock::sourceCodeForTools() const
115 {
116     if (codeType() != FunctionCode)
117         return ownerExecutable()->source().toUTF8();
118     
119     SourceProvider* provider = source();
120     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
121     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
122     unsigned unlinkedStartOffset = unlinked->startOffset();
123     unsigned linkedStartOffset = executable->source().startOffset();
124     int delta = linkedStartOffset - unlinkedStartOffset;
125     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
126     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
127     return toCString(
128         "function ",
129         provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
130 }
131
132 CString CodeBlock::sourceCodeOnOneLine() const
133 {
134     return reduceWhitespace(sourceCodeForTools());
135 }
136
137 CString CodeBlock::hashAsStringIfPossible() const
138 {
139     if (hasHash() || isSafeToComputeHash())
140         return toCString(hash());
141     return "<no-hash>";
142 }
143
144 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
145 {
146     out.print(inferredName(), "#", hashAsStringIfPossible());
147     out.print(":[", RawPointer(this), "->");
148     if (!!m_alternative)
149         out.print(RawPointer(m_alternative.get()), "->");
150     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
151
152     if (codeType() == FunctionCode)
153         out.print(specializationKind());
154     out.print(", ", instructionCount());
155     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
156         out.print(" (ShouldAlwaysBeInlined)");
157     if (ownerExecutable()->neverInline())
158         out.print(" (NeverInline)");
159     if (ownerExecutable()->didTryToEnterInLoop())
160         out.print(" (DidTryToEnterInLoop)");
161     if (ownerExecutable()->isStrictMode())
162         out.print(" (StrictMode)");
163     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
164         out.print(" (FTLFail)");
165     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
166         out.print(" (HadFTLReplacement)");
167     out.print("]");
168 }
169
170 void CodeBlock::dump(PrintStream& out) const
171 {
172     dumpAssumingJITType(out, jitType());
173 }
174
175 static CString constantName(int k, JSValue value)
176 {
177     return toCString(value, "(", VirtualRegister(k), ")");
178 }
179
180 static CString idName(int id0, const Identifier& ident)
181 {
182     return toCString(ident.impl(), "(@id", id0, ")");
183 }
184
185 CString CodeBlock::registerName(int r) const
186 {
187     if (isConstantRegisterIndex(r))
188         return constantName(r, getConstant(r));
189
190     return toCString(VirtualRegister(r));
191 }
192
193 static CString regexpToSourceString(RegExp* regExp)
194 {
195     char postfix[5] = { '/', 0, 0, 0, 0 };
196     int index = 1;
197     if (regExp->global())
198         postfix[index++] = 'g';
199     if (regExp->ignoreCase())
200         postfix[index++] = 'i';
201     if (regExp->multiline())
202         postfix[index] = 'm';
203
204     return toCString("/", regExp->pattern().impl(), postfix);
205 }
206
207 static CString regexpName(int re, RegExp* regexp)
208 {
209     return toCString(regexpToSourceString(regexp), "(@re", re, ")");
210 }
211
212 NEVER_INLINE static const char* debugHookName(int debugHookID)
213 {
214     switch (static_cast<DebugHookID>(debugHookID)) {
215         case DidEnterCallFrame:
216             return "didEnterCallFrame";
217         case WillLeaveCallFrame:
218             return "willLeaveCallFrame";
219         case WillExecuteStatement:
220             return "willExecuteStatement";
221         case WillExecuteProgram:
222             return "willExecuteProgram";
223         case DidExecuteProgram:
224             return "didExecuteProgram";
225         case DidReachBreakpoint:
226             return "didReachBreakpoint";
227     }
228
229     RELEASE_ASSERT_NOT_REACHED();
230     return "";
231 }
232
233 void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
234 {
235     int r0 = (++it)->u.operand;
236     int r1 = (++it)->u.operand;
237
238     printLocationAndOp(out, exec, location, it, op);
239     out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
240 }
241
242 void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
243 {
244     int r0 = (++it)->u.operand;
245     int r1 = (++it)->u.operand;
246     int r2 = (++it)->u.operand;
247     printLocationAndOp(out, exec, location, it, op);
248     out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
249 }
250
251 void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
252 {
253     int r0 = (++it)->u.operand;
254     int offset = (++it)->u.operand;
255     printLocationAndOp(out, exec, location, it, op);
256     out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
257 }
258
259 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
260 {
261     const char* op;
262     switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
263     case op_get_by_id:
264         op = "get_by_id";
265         break;
266     case op_get_by_id_out_of_line:
267         op = "get_by_id_out_of_line";
268         break;
269     case op_get_array_length:
270         op = "array_length";
271         break;
272     default:
273         RELEASE_ASSERT_NOT_REACHED();
274 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
275         op = 0;
276 #endif
277     }
278     int r0 = (++it)->u.operand;
279     int r1 = (++it)->u.operand;
280     int id0 = (++it)->u.operand;
281     printLocationAndOp(out, exec, location, it, op);
282     out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
283     it += 4; // Increment up to the value profiler.
284 }
285
286 static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
287 {
288     if (!structure)
289         return;
290     
291     out.printf("%s = %p", name, structure);
292     
293     PropertyOffset offset = structure->getConcurrently(ident.impl());
294     if (offset != invalidOffset)
295         out.printf(" (offset = %d)", offset);
296 }
297
298 static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
299 {
300     out.printf("chain = %p: [", chain);
301     bool first = true;
302     for (WriteBarrier<Structure>* currentStructure = chain->head();
303          *currentStructure;
304          ++currentStructure) {
305         if (first)
306             first = false;
307         else
308             out.printf(", ");
309         dumpStructure(out, "struct", currentStructure->get(), ident);
310     }
311     out.printf("]");
312 }
313
314 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
315 {
316     Instruction* instruction = instructions().begin() + location;
317
318     const Identifier& ident = identifier(instruction[3].u.operand);
319     
320     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
321     
322     if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
323         out.printf(" llint(array_length)");
324     else if (Structure* structure = instruction[4].u.structure.get()) {
325         out.printf(" llint(");
326         dumpStructure(out, "struct", structure, ident);
327         out.printf(")");
328     }
329
330 #if ENABLE(JIT)
331     if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
332         StructureStubInfo& stubInfo = *stubPtr;
333         if (stubInfo.resetByGC)
334             out.print(" (Reset By GC)");
335         
336         if (stubInfo.seen) {
337             out.printf(" jit(");
338             
339             Structure* baseStructure = 0;
340             Structure* prototypeStructure = 0;
341             StructureChain* chain = 0;
342             PolymorphicGetByIdList* list = 0;
343             
344             switch (stubInfo.accessType) {
345             case access_get_by_id_self:
346                 out.printf("self");
347                 baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
348                 break;
349             case access_get_by_id_list:
350                 out.printf("list");
351                 list = stubInfo.u.getByIdList.list;
352                 break;
353             case access_unset:
354                 out.printf("unset");
355                 break;
356             default:
357                 RELEASE_ASSERT_NOT_REACHED();
358                 break;
359             }
360             
361             if (baseStructure) {
362                 out.printf(", ");
363                 dumpStructure(out, "struct", baseStructure, ident);
364             }
365             
366             if (prototypeStructure) {
367                 out.printf(", ");
368                 dumpStructure(out, "prototypeStruct", baseStructure, ident);
369             }
370             
371             if (chain) {
372                 out.printf(", ");
373                 dumpChain(out, chain, ident);
374             }
375             
376             if (list) {
377                 out.printf(", list = %p: [", list);
378                 for (unsigned i = 0; i < list->size(); ++i) {
379                     if (i)
380                         out.printf(", ");
381                     out.printf("(");
382                     dumpStructure(out, "base", list->at(i).structure(), ident);
383                     if (list->at(i).chain()) {
384                         out.printf(", ");
385                         dumpChain(out, list->at(i).chain(), ident);
386                     }
387                     out.printf(")");
388                 }
389                 out.printf("]");
390             }
391             out.printf(")");
392         }
393     }
394 #else
395     UNUSED_PARAM(map);
396 #endif
397 }
398
399 void CodeBlock::printPutByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
400 {
401     Instruction* instruction = instructions().begin() + location;
402
403     const Identifier& ident = identifier(instruction[2].u.operand);
404     
405     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
406     
407     if (Structure* structure = instruction[4].u.structure.get()) {
408         switch (exec->interpreter()->getOpcodeID(instruction[0].u.opcode)) {
409         case op_put_by_id:
410         case op_put_by_id_out_of_line:
411             out.print(" llint(");
412             dumpStructure(out, "struct", structure, ident);
413             out.print(")");
414             break;
415             
416         case op_put_by_id_transition_direct:
417         case op_put_by_id_transition_normal:
418         case op_put_by_id_transition_direct_out_of_line:
419         case op_put_by_id_transition_normal_out_of_line:
420             out.print(" llint(");
421             dumpStructure(out, "prev", structure, ident);
422             out.print(", ");
423             dumpStructure(out, "next", instruction[6].u.structure.get(), ident);
424             if (StructureChain* chain = instruction[7].u.structureChain.get()) {
425                 out.print(", ");
426                 dumpChain(out, chain, ident);
427             }
428             out.print(")");
429             break;
430             
431         default:
432             out.print(" llint(unknown)");
433             break;
434         }
435     }
436
437 #if ENABLE(JIT)
438     if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
439         StructureStubInfo& stubInfo = *stubPtr;
440         if (stubInfo.resetByGC)
441             out.print(" (Reset By GC)");
442         
443         if (stubInfo.seen) {
444             out.printf(" jit(");
445             
446             switch (stubInfo.accessType) {
447             case access_put_by_id_replace:
448                 out.print("replace, ");
449                 dumpStructure(out, "struct", stubInfo.u.putByIdReplace.baseObjectStructure.get(), ident);
450                 break;
451             case access_put_by_id_transition_normal:
452             case access_put_by_id_transition_direct:
453                 out.print("transition, ");
454                 dumpStructure(out, "prev", stubInfo.u.putByIdTransition.previousStructure.get(), ident);
455                 out.print(", ");
456                 dumpStructure(out, "next", stubInfo.u.putByIdTransition.structure.get(), ident);
457                 if (StructureChain* chain = stubInfo.u.putByIdTransition.chain.get()) {
458                     out.print(", ");
459                     dumpChain(out, chain, ident);
460                 }
461                 break;
462             case access_put_by_id_list: {
463                 out.printf("list = [");
464                 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
465                 CommaPrinter comma;
466                 for (unsigned i = 0; i < list->size(); ++i) {
467                     out.print(comma, "(");
468                     const PutByIdAccess& access = list->at(i);
469                     
470                     if (access.isReplace()) {
471                         out.print("replace, ");
472                         dumpStructure(out, "struct", access.oldStructure(), ident);
473                     } else if (access.isSetter()) {
474                         out.print("setter, ");
475                         dumpStructure(out, "struct", access.oldStructure(), ident);
476                     } else if (access.isCustom()) {
477                         out.print("custom, ");
478                         dumpStructure(out, "struct", access.oldStructure(), ident);
479                     } else if (access.isTransition()) {
480                         out.print("transition, ");
481                         dumpStructure(out, "prev", access.oldStructure(), ident);
482                         out.print(", ");
483                         dumpStructure(out, "next", access.newStructure(), ident);
484                         if (access.chain()) {
485                             out.print(", ");
486                             dumpChain(out, access.chain(), ident);
487                         }
488                     } else
489                         out.print("unknown");
490                     
491                     out.print(")");
492                 }
493                 out.print("]");
494                 break;
495             }
496             case access_unset:
497                 out.printf("unset");
498                 break;
499             default:
500                 RELEASE_ASSERT_NOT_REACHED();
501                 break;
502             }
503             out.printf(")");
504         }
505     }
506 #else
507     UNUSED_PARAM(map);
508 #endif
509 }
510
511 void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
512 {
513     int dst = (++it)->u.operand;
514     int func = (++it)->u.operand;
515     int argCount = (++it)->u.operand;
516     int registerOffset = (++it)->u.operand;
517     printLocationAndOp(out, exec, location, it, op);
518     out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
519     if (cacheDumpMode == DumpCaches) {
520         LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
521         if (callLinkInfo->lastSeenCallee) {
522             out.printf(
523                 " llint(%p, exec %p)",
524                 callLinkInfo->lastSeenCallee.get(),
525                 callLinkInfo->lastSeenCallee->executable());
526         }
527 #if ENABLE(JIT)
528         if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
529             JSFunction* target = info->lastSeenCallee.get();
530             if (target)
531                 out.printf(" jit(%p, exec %p)", target, target->executable());
532         }
533         out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
534 #else
535         UNUSED_PARAM(map);
536 #endif
537     }
538     ++it;
539     ++it;
540     dumpArrayProfiling(out, it, hasPrintedProfiling);
541     dumpValueProfiling(out, it, hasPrintedProfiling);
542 }
543
544 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
545 {
546     int r0 = (++it)->u.operand;
547     int id0 = (++it)->u.operand;
548     int r1 = (++it)->u.operand;
549     printLocationAndOp(out, exec, location, it, op);
550     out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
551     it += 5;
552 }
553
554 void CodeBlock::dumpBytecode(PrintStream& out)
555 {
556     // We only use the ExecState* for things that don't actually lead to JS execution,
557     // like converting a JSString to a String. Hence the globalExec is appropriate.
558     ExecState* exec = m_globalObject->globalExec();
559     
560     size_t instructionCount = 0;
561
562     for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
563         ++instructionCount;
564
565     out.print(*this);
566     out.printf(
567         ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
568         static_cast<unsigned long>(instructions().size()),
569         static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
570         m_numParameters, m_numCalleeRegisters, m_numVars);
571     if (symbolTable() && symbolTable()->captureCount()) {
572         out.printf(
573             "; %d captured var(s) (from r%d to r%d, inclusive)",
574             symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() + 1);
575     }
576     if (usesArguments()) {
577         out.printf(
578             "; uses arguments, in r%d, r%d",
579             argumentsRegister().offset(),
580             unmodifiedArgumentsRegister(argumentsRegister()).offset());
581     }
582     if (needsActivation() && codeType() == FunctionCode)
583         out.printf("; lexical environment in r%d", activationRegister().offset());
584     out.printf("\n");
585     
586     StubInfoMap stubInfos;
587     CallLinkInfoMap callLinkInfos;
588     getStubInfoMap(stubInfos);
589     getCallLinkInfoMap(callLinkInfos);
590     
591     const Instruction* begin = instructions().begin();
592     const Instruction* end = instructions().end();
593     for (const Instruction* it = begin; it != end; ++it)
594         dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
595     
596     if (numberOfIdentifiers()) {
597         out.printf("\nIdentifiers:\n");
598         size_t i = 0;
599         do {
600             out.printf("  id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
601             ++i;
602         } while (i != numberOfIdentifiers());
603     }
604
605     if (!m_constantRegisters.isEmpty()) {
606         out.printf("\nConstants:\n");
607         size_t i = 0;
608         do {
609             out.printf("   k%u = %s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data());
610             ++i;
611         } while (i < m_constantRegisters.size());
612     }
613
614     if (size_t count = m_unlinkedCode->numberOfRegExps()) {
615         out.printf("\nm_regexps:\n");
616         size_t i = 0;
617         do {
618             out.printf("  re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
619             ++i;
620         } while (i < count);
621     }
622
623     if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
624         out.printf("\nException Handlers:\n");
625         unsigned i = 0;
626         do {
627             out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
628             ++i;
629         } while (i < m_rareData->m_exceptionHandlers.size());
630     }
631     
632     if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
633         out.printf("Switch Jump Tables:\n");
634         unsigned i = 0;
635         do {
636             out.printf("  %1d = {\n", i);
637             int entry = 0;
638             Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
639             for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
640                 if (!*iter)
641                     continue;
642                 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
643             }
644             out.printf("      }\n");
645             ++i;
646         } while (i < m_rareData->m_switchJumpTables.size());
647     }
648     
649     if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
650         out.printf("\nString Switch Jump Tables:\n");
651         unsigned i = 0;
652         do {
653             out.printf("  %1d = {\n", i);
654             StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
655             for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
656                 out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
657             out.printf("      }\n");
658             ++i;
659         } while (i < m_rareData->m_stringSwitchJumpTables.size());
660     }
661
662     out.printf("\n");
663 }
664
665 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
666 {
667     if (hasPrintedProfiling) {
668         out.print("; ");
669         return;
670     }
671     
672     out.print("    ");
673     hasPrintedProfiling = true;
674 }
675
676 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
677 {
678     ConcurrentJITLocker locker(m_lock);
679     
680     ++it;
681     CString description = it->u.profile->briefDescription(locker);
682     if (!description.length())
683         return;
684     beginDumpProfiling(out, hasPrintedProfiling);
685     out.print(description);
686 }
687
688 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
689 {
690     ConcurrentJITLocker locker(m_lock);
691     
692     ++it;
693     if (!it->u.arrayProfile)
694         return;
695     CString description = it->u.arrayProfile->briefDescription(locker, this);
696     if (!description.length())
697         return;
698     beginDumpProfiling(out, hasPrintedProfiling);
699     out.print(description);
700 }
701
702 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
703 {
704     if (!profile || !profile->m_counter)
705         return;
706
707     beginDumpProfiling(out, hasPrintedProfiling);
708     out.print(name, profile->m_counter);
709 }
710
711 void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
712 {
713     out.printf("[%4d] %-17s ", location, op);
714 }
715
716 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
717 {
718     printLocationAndOp(out, exec, location, it, op);
719     out.printf("%s", registerName(operand).data());
720 }
721
722 void CodeBlock::dumpBytecode(
723     PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
724     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
725 {
726     int location = it - begin;
727     bool hasPrintedProfiling = false;
728     OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
729     switch (opcode) {
730         case op_enter: {
731             printLocationAndOp(out, exec, location, it, "enter");
732             break;
733         }
734         case op_touch_entry: {
735             printLocationAndOp(out, exec, location, it, "touch_entry");
736             break;
737         }
738         case op_create_lexical_environment: {
739             int r0 = (++it)->u.operand;
740             int r1 = (++it)->u.operand;
741             printLocationAndOp(out, exec, location, it, "create_lexical_environment");
742             out.printf("%s %s", registerName(r0).data(), registerName(r1).data());
743             break;
744         }
745         case op_get_scope: {
746             int r0 = (++it)->u.operand;
747             printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
748             break;
749         }
750         case op_create_arguments: {
751             int r0 = (++it)->u.operand;
752             int r1 = (++it)->u.operand;
753             printLocationAndOp(out, exec, location, it, "create_arguments");
754             out.printf("%s %s", registerName(r0).data(), registerName(r1).data());
755             break;
756         }
757         case op_init_lazy_reg: {
758             int r0 = (++it)->u.operand;
759             printLocationOpAndRegisterOperand(out, exec, location, it, "init_lazy_reg", r0);
760             break;
761         }
762         case op_get_callee: {
763             int r0 = (++it)->u.operand;
764             printLocationOpAndRegisterOperand(out, exec, location, it, "get_callee", r0);
765             ++it;
766             break;
767         }
768         case op_create_this: {
769             int r0 = (++it)->u.operand;
770             int r1 = (++it)->u.operand;
771             unsigned inferredInlineCapacity = (++it)->u.operand;
772             printLocationAndOp(out, exec, location, it, "create_this");
773             out.printf("%s, %s, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity);
774             break;
775         }
776         case op_to_this: {
777             int r0 = (++it)->u.operand;
778             printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
779             Structure* structure = (++it)->u.structure.get();
780             if (structure)
781                 out.print(" cache(struct = ", RawPointer(structure), ")");
782             out.print(" ", (++it)->u.toThisStatus);
783             break;
784         }
785         case op_new_object: {
786             int r0 = (++it)->u.operand;
787             unsigned inferredInlineCapacity = (++it)->u.operand;
788             printLocationAndOp(out, exec, location, it, "new_object");
789             out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
790             ++it; // Skip object allocation profile.
791             break;
792         }
793         case op_new_array: {
794             int dst = (++it)->u.operand;
795             int argv = (++it)->u.operand;
796             int argc = (++it)->u.operand;
797             printLocationAndOp(out, exec, location, it, "new_array");
798             out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
799             ++it; // Skip array allocation profile.
800             break;
801         }
802         case op_new_array_with_size: {
803             int dst = (++it)->u.operand;
804             int length = (++it)->u.operand;
805             printLocationAndOp(out, exec, location, it, "new_array_with_size");
806             out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
807             ++it; // Skip array allocation profile.
808             break;
809         }
810         case op_new_array_buffer: {
811             int dst = (++it)->u.operand;
812             int argv = (++it)->u.operand;
813             int argc = (++it)->u.operand;
814             printLocationAndOp(out, exec, location, it, "new_array_buffer");
815             out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
816             ++it; // Skip array allocation profile.
817             break;
818         }
819         case op_new_regexp: {
820             int r0 = (++it)->u.operand;
821             int re0 = (++it)->u.operand;
822             printLocationAndOp(out, exec, location, it, "new_regexp");
823             out.printf("%s, ", registerName(r0).data());
824             if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
825                 out.printf("%s", regexpName(re0, regexp(re0)).data());
826             else
827                 out.printf("bad_regexp(%d)", re0);
828             break;
829         }
830         case op_mov: {
831             int r0 = (++it)->u.operand;
832             int r1 = (++it)->u.operand;
833             printLocationAndOp(out, exec, location, it, "mov");
834             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
835             break;
836         }
837         case op_profile_type: {
838             int r0 = (++it)->u.operand;
839             ++it;
840             ++it;
841             ++it;
842             ++it;
843             printLocationAndOp(out, exec, location, it, "op_profile_type");
844             out.printf("%s", registerName(r0).data());
845             break;
846         }
847         case op_profile_control_flow: {
848             BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
849             printLocationAndOp(out, exec, location, it, "profile_control_flow");
850             out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
851             break;
852         }
853         case op_not: {
854             printUnaryOp(out, exec, location, it, "not");
855             break;
856         }
857         case op_eq: {
858             printBinaryOp(out, exec, location, it, "eq");
859             break;
860         }
861         case op_eq_null: {
862             printUnaryOp(out, exec, location, it, "eq_null");
863             break;
864         }
865         case op_neq: {
866             printBinaryOp(out, exec, location, it, "neq");
867             break;
868         }
869         case op_neq_null: {
870             printUnaryOp(out, exec, location, it, "neq_null");
871             break;
872         }
873         case op_stricteq: {
874             printBinaryOp(out, exec, location, it, "stricteq");
875             break;
876         }
877         case op_nstricteq: {
878             printBinaryOp(out, exec, location, it, "nstricteq");
879             break;
880         }
881         case op_less: {
882             printBinaryOp(out, exec, location, it, "less");
883             break;
884         }
885         case op_lesseq: {
886             printBinaryOp(out, exec, location, it, "lesseq");
887             break;
888         }
889         case op_greater: {
890             printBinaryOp(out, exec, location, it, "greater");
891             break;
892         }
893         case op_greatereq: {
894             printBinaryOp(out, exec, location, it, "greatereq");
895             break;
896         }
897         case op_inc: {
898             int r0 = (++it)->u.operand;
899             printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
900             break;
901         }
902         case op_dec: {
903             int r0 = (++it)->u.operand;
904             printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
905             break;
906         }
907         case op_to_number: {
908             printUnaryOp(out, exec, location, it, "to_number");
909             break;
910         }
911         case op_negate: {
912             printUnaryOp(out, exec, location, it, "negate");
913             break;
914         }
915         case op_add: {
916             printBinaryOp(out, exec, location, it, "add");
917             ++it;
918             break;
919         }
920         case op_mul: {
921             printBinaryOp(out, exec, location, it, "mul");
922             ++it;
923             break;
924         }
925         case op_div: {
926             printBinaryOp(out, exec, location, it, "div");
927             ++it;
928             break;
929         }
930         case op_mod: {
931             printBinaryOp(out, exec, location, it, "mod");
932             break;
933         }
934         case op_sub: {
935             printBinaryOp(out, exec, location, it, "sub");
936             ++it;
937             break;
938         }
939         case op_lshift: {
940             printBinaryOp(out, exec, location, it, "lshift");
941             break;            
942         }
943         case op_rshift: {
944             printBinaryOp(out, exec, location, it, "rshift");
945             break;
946         }
947         case op_urshift: {
948             printBinaryOp(out, exec, location, it, "urshift");
949             break;
950         }
951         case op_bitand: {
952             printBinaryOp(out, exec, location, it, "bitand");
953             ++it;
954             break;
955         }
956         case op_bitxor: {
957             printBinaryOp(out, exec, location, it, "bitxor");
958             ++it;
959             break;
960         }
961         case op_bitor: {
962             printBinaryOp(out, exec, location, it, "bitor");
963             ++it;
964             break;
965         }
966         case op_check_has_instance: {
967             int r0 = (++it)->u.operand;
968             int r1 = (++it)->u.operand;
969             int r2 = (++it)->u.operand;
970             int offset = (++it)->u.operand;
971             printLocationAndOp(out, exec, location, it, "check_has_instance");
972             out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
973             break;
974         }
975         case op_instanceof: {
976             int r0 = (++it)->u.operand;
977             int r1 = (++it)->u.operand;
978             int r2 = (++it)->u.operand;
979             printLocationAndOp(out, exec, location, it, "instanceof");
980             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
981             break;
982         }
983         case op_unsigned: {
984             printUnaryOp(out, exec, location, it, "unsigned");
985             break;
986         }
987         case op_typeof: {
988             printUnaryOp(out, exec, location, it, "typeof");
989             break;
990         }
991         case op_is_undefined: {
992             printUnaryOp(out, exec, location, it, "is_undefined");
993             break;
994         }
995         case op_is_boolean: {
996             printUnaryOp(out, exec, location, it, "is_boolean");
997             break;
998         }
999         case op_is_number: {
1000             printUnaryOp(out, exec, location, it, "is_number");
1001             break;
1002         }
1003         case op_is_string: {
1004             printUnaryOp(out, exec, location, it, "is_string");
1005             break;
1006         }
1007         case op_is_object: {
1008             printUnaryOp(out, exec, location, it, "is_object");
1009             break;
1010         }
1011         case op_is_object_or_null: {
1012             printUnaryOp(out, exec, location, it, "is_object_or_null");
1013             break;
1014         }
1015         case op_is_function: {
1016             printUnaryOp(out, exec, location, it, "is_function");
1017             break;
1018         }
1019         case op_in: {
1020             printBinaryOp(out, exec, location, it, "in");
1021             break;
1022         }
1023         case op_init_global_const_nop: {
1024             printLocationAndOp(out, exec, location, it, "init_global_const_nop");
1025             it++;
1026             it++;
1027             it++;
1028             it++;
1029             break;
1030         }
1031         case op_init_global_const: {
1032             WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
1033             int r0 = (++it)->u.operand;
1034             printLocationAndOp(out, exec, location, it, "init_global_const");
1035             out.printf("g%d(%p), %s", m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(r0).data());
1036             it++;
1037             it++;
1038             break;
1039         }
1040         case op_get_by_id:
1041         case op_get_by_id_out_of_line:
1042         case op_get_array_length: {
1043             printGetByIdOp(out, exec, location, it);
1044             printGetByIdCacheStatus(out, exec, location, stubInfos);
1045             dumpValueProfiling(out, it, hasPrintedProfiling);
1046             break;
1047         }
1048         case op_get_arguments_length: {
1049             printUnaryOp(out, exec, location, it, "get_arguments_length");
1050             it++;
1051             break;
1052         }
1053         case op_put_by_id: {
1054             printPutByIdOp(out, exec, location, it, "put_by_id");
1055             printPutByIdCacheStatus(out, exec, location, stubInfos);
1056             break;
1057         }
1058         case op_put_by_id_out_of_line: {
1059             printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
1060             printPutByIdCacheStatus(out, exec, location, stubInfos);
1061             break;
1062         }
1063         case op_put_by_id_transition_direct: {
1064             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
1065             printPutByIdCacheStatus(out, exec, location, stubInfos);
1066             break;
1067         }
1068         case op_put_by_id_transition_direct_out_of_line: {
1069             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
1070             printPutByIdCacheStatus(out, exec, location, stubInfos);
1071             break;
1072         }
1073         case op_put_by_id_transition_normal: {
1074             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
1075             printPutByIdCacheStatus(out, exec, location, stubInfos);
1076             break;
1077         }
1078         case op_put_by_id_transition_normal_out_of_line: {
1079             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
1080             printPutByIdCacheStatus(out, exec, location, stubInfos);
1081             break;
1082         }
1083         case op_put_getter_setter: {
1084             int r0 = (++it)->u.operand;
1085             int id0 = (++it)->u.operand;
1086             int r1 = (++it)->u.operand;
1087             int r2 = (++it)->u.operand;
1088             printLocationAndOp(out, exec, location, it, "put_getter_setter");
1089             out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
1090             break;
1091         }
1092         case op_del_by_id: {
1093             int r0 = (++it)->u.operand;
1094             int r1 = (++it)->u.operand;
1095             int id0 = (++it)->u.operand;
1096             printLocationAndOp(out, exec, location, it, "del_by_id");
1097             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1098             break;
1099         }
1100         case op_get_by_val: {
1101             int r0 = (++it)->u.operand;
1102             int r1 = (++it)->u.operand;
1103             int r2 = (++it)->u.operand;
1104             printLocationAndOp(out, exec, location, it, "get_by_val");
1105             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1106             dumpArrayProfiling(out, it, hasPrintedProfiling);
1107             dumpValueProfiling(out, it, hasPrintedProfiling);
1108             break;
1109         }
1110         case op_get_argument_by_val: {
1111             int r0 = (++it)->u.operand;
1112             int r1 = (++it)->u.operand;
1113             int r2 = (++it)->u.operand;
1114             int r3 = (++it)->u.operand;
1115             printLocationAndOp(out, exec, location, it, "get_argument_by_val");
1116             out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data());
1117             ++it;
1118             dumpValueProfiling(out, it, hasPrintedProfiling);
1119             break;
1120         }
1121         case op_put_by_val: {
1122             int r0 = (++it)->u.operand;
1123             int r1 = (++it)->u.operand;
1124             int r2 = (++it)->u.operand;
1125             printLocationAndOp(out, exec, location, it, "put_by_val");
1126             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1127             dumpArrayProfiling(out, it, hasPrintedProfiling);
1128             break;
1129         }
1130         case op_put_by_val_direct: {
1131             int r0 = (++it)->u.operand;
1132             int r1 = (++it)->u.operand;
1133             int r2 = (++it)->u.operand;
1134             printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1135             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1136             dumpArrayProfiling(out, it, hasPrintedProfiling);
1137             break;
1138         }
1139         case op_del_by_val: {
1140             int r0 = (++it)->u.operand;
1141             int r1 = (++it)->u.operand;
1142             int r2 = (++it)->u.operand;
1143             printLocationAndOp(out, exec, location, it, "del_by_val");
1144             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1145             break;
1146         }
1147         case op_put_by_index: {
1148             int r0 = (++it)->u.operand;
1149             unsigned n0 = (++it)->u.operand;
1150             int r1 = (++it)->u.operand;
1151             printLocationAndOp(out, exec, location, it, "put_by_index");
1152             out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1153             break;
1154         }
1155         case op_jmp: {
1156             int offset = (++it)->u.operand;
1157             printLocationAndOp(out, exec, location, it, "jmp");
1158             out.printf("%d(->%d)", offset, location + offset);
1159             break;
1160         }
1161         case op_jtrue: {
1162             printConditionalJump(out, exec, begin, it, location, "jtrue");
1163             break;
1164         }
1165         case op_jfalse: {
1166             printConditionalJump(out, exec, begin, it, location, "jfalse");
1167             break;
1168         }
1169         case op_jeq_null: {
1170             printConditionalJump(out, exec, begin, it, location, "jeq_null");
1171             break;
1172         }
1173         case op_jneq_null: {
1174             printConditionalJump(out, exec, begin, it, location, "jneq_null");
1175             break;
1176         }
1177         case op_jneq_ptr: {
1178             int r0 = (++it)->u.operand;
1179             Special::Pointer pointer = (++it)->u.specialPointer;
1180             int offset = (++it)->u.operand;
1181             printLocationAndOp(out, exec, location, it, "jneq_ptr");
1182             out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1183             break;
1184         }
1185         case op_jless: {
1186             int r0 = (++it)->u.operand;
1187             int r1 = (++it)->u.operand;
1188             int offset = (++it)->u.operand;
1189             printLocationAndOp(out, exec, location, it, "jless");
1190             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1191             break;
1192         }
1193         case op_jlesseq: {
1194             int r0 = (++it)->u.operand;
1195             int r1 = (++it)->u.operand;
1196             int offset = (++it)->u.operand;
1197             printLocationAndOp(out, exec, location, it, "jlesseq");
1198             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1199             break;
1200         }
1201         case op_jgreater: {
1202             int r0 = (++it)->u.operand;
1203             int r1 = (++it)->u.operand;
1204             int offset = (++it)->u.operand;
1205             printLocationAndOp(out, exec, location, it, "jgreater");
1206             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1207             break;
1208         }
1209         case op_jgreatereq: {
1210             int r0 = (++it)->u.operand;
1211             int r1 = (++it)->u.operand;
1212             int offset = (++it)->u.operand;
1213             printLocationAndOp(out, exec, location, it, "jgreatereq");
1214             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1215             break;
1216         }
1217         case op_jnless: {
1218             int r0 = (++it)->u.operand;
1219             int r1 = (++it)->u.operand;
1220             int offset = (++it)->u.operand;
1221             printLocationAndOp(out, exec, location, it, "jnless");
1222             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1223             break;
1224         }
1225         case op_jnlesseq: {
1226             int r0 = (++it)->u.operand;
1227             int r1 = (++it)->u.operand;
1228             int offset = (++it)->u.operand;
1229             printLocationAndOp(out, exec, location, it, "jnlesseq");
1230             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1231             break;
1232         }
1233         case op_jngreater: {
1234             int r0 = (++it)->u.operand;
1235             int r1 = (++it)->u.operand;
1236             int offset = (++it)->u.operand;
1237             printLocationAndOp(out, exec, location, it, "jngreater");
1238             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1239             break;
1240         }
1241         case op_jngreatereq: {
1242             int r0 = (++it)->u.operand;
1243             int r1 = (++it)->u.operand;
1244             int offset = (++it)->u.operand;
1245             printLocationAndOp(out, exec, location, it, "jngreatereq");
1246             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1247             break;
1248         }
1249         case op_loop_hint: {
1250             printLocationAndOp(out, exec, location, it, "loop_hint");
1251             break;
1252         }
1253         case op_switch_imm: {
1254             int tableIndex = (++it)->u.operand;
1255             int defaultTarget = (++it)->u.operand;
1256             int scrutineeRegister = (++it)->u.operand;
1257             printLocationAndOp(out, exec, location, it, "switch_imm");
1258             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1259             break;
1260         }
1261         case op_switch_char: {
1262             int tableIndex = (++it)->u.operand;
1263             int defaultTarget = (++it)->u.operand;
1264             int scrutineeRegister = (++it)->u.operand;
1265             printLocationAndOp(out, exec, location, it, "switch_char");
1266             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1267             break;
1268         }
1269         case op_switch_string: {
1270             int tableIndex = (++it)->u.operand;
1271             int defaultTarget = (++it)->u.operand;
1272             int scrutineeRegister = (++it)->u.operand;
1273             printLocationAndOp(out, exec, location, it, "switch_string");
1274             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1275             break;
1276         }
1277         case op_new_func: {
1278             int r0 = (++it)->u.operand;
1279             int r1 = (++it)->u.operand;
1280             int f0 = (++it)->u.operand;
1281             int shouldCheck = (++it)->u.operand;
1282             printLocationAndOp(out, exec, location, it, "new_func");
1283             out.printf("%s, %s, f%d, %s", registerName(r0).data(), registerName(r1).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
1284             break;
1285         }
1286         case op_new_func_exp: {
1287             int r0 = (++it)->u.operand;
1288             int r1 = (++it)->u.operand;
1289             int f0 = (++it)->u.operand;
1290             printLocationAndOp(out, exec, location, it, "new_func_exp");
1291             out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1292             break;
1293         }
1294         case op_call: {
1295             printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1296             break;
1297         }
1298         case op_call_eval: {
1299             printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
1300             break;
1301         }
1302             
1303         case op_construct_varargs:
1304         case op_call_varargs: {
1305             int result = (++it)->u.operand;
1306             int callee = (++it)->u.operand;
1307             int thisValue = (++it)->u.operand;
1308             int arguments = (++it)->u.operand;
1309             int firstFreeRegister = (++it)->u.operand;
1310             int varArgOffset = (++it)->u.operand;
1311             ++it;
1312             printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : "construct_varargs");
1313             out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
1314             dumpValueProfiling(out, it, hasPrintedProfiling);
1315             break;
1316         }
1317
1318         case op_tear_off_arguments: {
1319             int r0 = (++it)->u.operand;
1320             int r1 = (++it)->u.operand;
1321             printLocationAndOp(out, exec, location, it, "tear_off_arguments");
1322             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1323             break;
1324         }
1325         case op_ret: {
1326             int r0 = (++it)->u.operand;
1327             printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1328             break;
1329         }
1330         case op_construct: {
1331             printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
1332             break;
1333         }
1334         case op_strcat: {
1335             int r0 = (++it)->u.operand;
1336             int r1 = (++it)->u.operand;
1337             int count = (++it)->u.operand;
1338             printLocationAndOp(out, exec, location, it, "strcat");
1339             out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1340             break;
1341         }
1342         case op_to_primitive: {
1343             int r0 = (++it)->u.operand;
1344             int r1 = (++it)->u.operand;
1345             printLocationAndOp(out, exec, location, it, "to_primitive");
1346             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1347             break;
1348         }
1349         case op_get_enumerable_length: {
1350             int dst = it[1].u.operand;
1351             int base = it[2].u.operand;
1352             printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
1353             out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1354             it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
1355             break;
1356         }
1357         case op_has_indexed_property: {
1358             int dst = it[1].u.operand;
1359             int base = it[2].u.operand;
1360             int propertyName = it[3].u.operand;
1361             ArrayProfile* arrayProfile = it[4].u.arrayProfile;
1362             printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
1363             out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
1364             it += OPCODE_LENGTH(op_has_indexed_property) - 1;
1365             break;
1366         }
1367         case op_has_structure_property: {
1368             int dst = it[1].u.operand;
1369             int base = it[2].u.operand;
1370             int propertyName = it[3].u.operand;
1371             int enumerator = it[4].u.operand;
1372             printLocationAndOp(out, exec, location, it, "op_has_structure_property");
1373             out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
1374             it += OPCODE_LENGTH(op_has_structure_property) - 1;
1375             break;
1376         }
1377         case op_has_generic_property: {
1378             int dst = it[1].u.operand;
1379             int base = it[2].u.operand;
1380             int propertyName = it[3].u.operand;
1381             printLocationAndOp(out, exec, location, it, "op_has_generic_property");
1382             out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
1383             it += OPCODE_LENGTH(op_has_generic_property) - 1;
1384             break;
1385         }
1386         case op_get_direct_pname: {
1387             int dst = it[1].u.operand;
1388             int base = it[2].u.operand;
1389             int propertyName = it[3].u.operand;
1390             int index = it[4].u.operand;
1391             int enumerator = it[5].u.operand;
1392             ValueProfile* profile = it[6].u.profile;
1393             printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
1394             out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
1395             it += OPCODE_LENGTH(op_get_direct_pname) - 1;
1396             break;
1397
1398         }
1399         case op_get_structure_property_enumerator: {
1400             int dst = it[1].u.operand;
1401             int base = it[2].u.operand;
1402             printLocationAndOp(out, exec, location, it, "op_get_structure_property_enumerator");
1403             out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1404             it += OPCODE_LENGTH(op_get_structure_property_enumerator) - 1;
1405             break;
1406         }
1407         case op_get_generic_property_enumerator: {
1408             int dst = it[1].u.operand;
1409             int base = it[2].u.operand;
1410             int length = it[3].u.operand;
1411             int structureEnumerator = it[4].u.operand;
1412             printLocationAndOp(out, exec, location, it, "op_get_generic_property_enumerator");
1413             out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(length).data(), registerName(structureEnumerator).data());
1414             it += OPCODE_LENGTH(op_get_generic_property_enumerator) - 1;
1415             break;
1416         }
1417         case op_next_enumerator_pname: {
1418             int dst = it[1].u.operand;
1419             int enumerator = it[2].u.operand;
1420             int index = it[3].u.operand;
1421             printLocationAndOp(out, exec, location, it, "op_next_enumerator_pname");
1422             out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1423             it += OPCODE_LENGTH(op_next_enumerator_pname) - 1;
1424             break;
1425         }
1426         case op_to_index_string: {
1427             int dst = it[1].u.operand;
1428             int index = it[2].u.operand;
1429             printLocationAndOp(out, exec, location, it, "op_to_index_string");
1430             out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
1431             it += OPCODE_LENGTH(op_to_index_string) - 1;
1432             break;
1433         }
1434         case op_push_with_scope: {
1435             int dst = (++it)->u.operand;
1436             int newScope = (++it)->u.operand;
1437             printLocationAndOp(out, exec, location, it, "push_with_scope");
1438             out.printf("%s, %s", registerName(dst).data(), registerName(newScope).data());
1439             break;
1440         }
1441         case op_pop_scope: {
1442             int r0 = (++it)->u.operand;
1443             printLocationOpAndRegisterOperand(out, exec, location, it, "pop_scope", r0);
1444             break;
1445         }
1446         case op_push_name_scope: {
1447             int dst = (++it)->u.operand;
1448             int id0 = (++it)->u.operand;
1449             int r1 = (++it)->u.operand;
1450             unsigned attributes = (++it)->u.operand;
1451             JSNameScope::Type scopeType = (JSNameScope::Type)(++it)->u.operand;
1452             printLocationAndOp(out, exec, location, it, "push_name_scope");
1453             out.printf("%s, %s, %s, %u %s", registerName(dst).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), attributes, (scopeType == JSNameScope::FunctionNameScope) ? "functionScope" : ((scopeType == JSNameScope::CatchScope) ? "catchScope" : "unknownScopeType"));
1454             break;
1455         }
1456         case op_catch: {
1457             int r0 = (++it)->u.operand;
1458             printLocationOpAndRegisterOperand(out, exec, location, it, "catch", r0);
1459             break;
1460         }
1461         case op_throw: {
1462             int r0 = (++it)->u.operand;
1463             printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1464             break;
1465         }
1466         case op_throw_static_error: {
1467             int k0 = (++it)->u.operand;
1468             int k1 = (++it)->u.operand;
1469             printLocationAndOp(out, exec, location, it, "throw_static_error");
1470             out.printf("%s, %s", constantName(k0, getConstant(k0)).data(), k1 ? "true" : "false");
1471             break;
1472         }
1473         case op_debug: {
1474             int debugHookID = (++it)->u.operand;
1475             int hasBreakpointFlag = (++it)->u.operand;
1476             printLocationAndOp(out, exec, location, it, "debug");
1477             out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
1478             break;
1479         }
1480         case op_profile_will_call: {
1481             int function = (++it)->u.operand;
1482             printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
1483             break;
1484         }
1485         case op_profile_did_call: {
1486             int function = (++it)->u.operand;
1487             printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
1488             break;
1489         }
1490         case op_end: {
1491             int r0 = (++it)->u.operand;
1492             printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1493             break;
1494         }
1495         case op_resolve_scope: {
1496             int r0 = (++it)->u.operand;
1497             int scope = (++it)->u.operand;
1498             int id0 = (++it)->u.operand;
1499             ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1500             int depth = (++it)->u.operand;
1501             printLocationAndOp(out, exec, location, it, "resolve_scope");
1502             out.printf("%s, %s, %s, %u<%s|%s>, %d", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(),
1503                 modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1504                 depth);
1505             ++it;
1506             break;
1507         }
1508         case op_get_from_scope: {
1509             int r0 = (++it)->u.operand;
1510             int r1 = (++it)->u.operand;
1511             int id0 = (++it)->u.operand;
1512             ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1513             ++it; // Structure
1514             int operand = (++it)->u.operand; // Operand
1515             ++it; // Skip value profile.
1516             printLocationAndOp(out, exec, location, it, "get_from_scope");
1517             out.printf("%s, %s, %s, %u<%s|%s>, <structure>, %d",
1518                 registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(),
1519                 modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1520                 operand);
1521             break;
1522         }
1523         case op_put_to_scope: {
1524             int r0 = (++it)->u.operand;
1525             int id0 = (++it)->u.operand;
1526             int r1 = (++it)->u.operand;
1527             ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1528             ++it; // Structure
1529             int operand = (++it)->u.operand; // Operand
1530             printLocationAndOp(out, exec, location, it, "put_to_scope");
1531             out.printf("%s, %s, %s, %u<%s|%s>, <structure>, %d",
1532                 registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(),
1533                 modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1534                 operand);
1535             break;
1536         }
1537         default:
1538             RELEASE_ASSERT_NOT_REACHED();
1539     }
1540
1541     dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1542     dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1543     
1544 #if ENABLE(DFG_JIT)
1545     Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1546     if (!exitSites.isEmpty()) {
1547         out.print(" !! frequent exits: ");
1548         CommaPrinter comma;
1549         for (unsigned i = 0; i < exitSites.size(); ++i)
1550             out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
1551     }
1552 #else // ENABLE(DFG_JIT)
1553     UNUSED_PARAM(location);
1554 #endif // ENABLE(DFG_JIT)
1555     out.print("\n");
1556 }
1557
1558 void CodeBlock::dumpBytecode(
1559     PrintStream& out, unsigned bytecodeOffset,
1560     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
1561 {
1562     ExecState* exec = m_globalObject->globalExec();
1563     const Instruction* it = instructions().begin() + bytecodeOffset;
1564     dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
1565 }
1566
1567 #define FOR_EACH_MEMBER_VECTOR(macro) \
1568     macro(instructions) \
1569     macro(callLinkInfos) \
1570     macro(linkedCallerList) \
1571     macro(identifiers) \
1572     macro(functionExpressions) \
1573     macro(constantRegisters)
1574
1575 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1576     macro(regexps) \
1577     macro(functions) \
1578     macro(exceptionHandlers) \
1579     macro(switchJumpTables) \
1580     macro(stringSwitchJumpTables) \
1581     macro(evalCodeCache) \
1582     macro(expressionInfo) \
1583     macro(lineInfo) \
1584     macro(callReturnIndexVector)
1585
1586 template<typename T>
1587 static size_t sizeInBytes(const Vector<T>& vector)
1588 {
1589     return vector.capacity() * sizeof(T);
1590 }
1591
1592 namespace {
1593
1594 class PutToScopeFireDetail : public FireDetail {
1595 public:
1596     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
1597         : m_codeBlock(codeBlock)
1598         , m_ident(ident)
1599     {
1600     }
1601     
1602     virtual void dump(PrintStream& out) const override
1603     {
1604         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
1605     }
1606     
1607 private:
1608     CodeBlock* m_codeBlock;
1609     const Identifier& m_ident;
1610 };
1611
1612 } // anonymous namespace
1613
1614 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1615     : m_globalObject(other.m_globalObject)
1616     , m_heap(other.m_heap)
1617     , m_numCalleeRegisters(other.m_numCalleeRegisters)
1618     , m_numVars(other.m_numVars)
1619     , m_isConstructor(other.m_isConstructor)
1620     , m_shouldAlwaysBeInlined(true)
1621     , m_didFailFTLCompilation(false)
1622     , m_hasBeenCompiledWithFTL(false)
1623     , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1624     , m_hasDebuggerStatement(false)
1625     , m_steppingMode(SteppingModeDisabled)
1626     , m_numBreakpoints(0)
1627     , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1628     , m_vm(other.m_vm)
1629     , m_instructions(other.m_instructions)
1630     , m_thisRegister(other.m_thisRegister)
1631     , m_scopeRegister(other.m_scopeRegister)
1632     , m_argumentsRegister(other.m_argumentsRegister)
1633     , m_lexicalEnvironmentRegister(other.m_lexicalEnvironmentRegister)
1634     , m_isStrictMode(other.m_isStrictMode)
1635     , m_needsActivation(other.m_needsActivation)
1636     , m_mayBeExecuting(false)
1637     , m_visitAggregateHasBeenCalled(false)
1638     , m_source(other.m_source)
1639     , m_sourceOffset(other.m_sourceOffset)
1640     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1641     , m_codeType(other.m_codeType)
1642     , m_constantRegisters(other.m_constantRegisters)
1643     , m_functionDecls(other.m_functionDecls)
1644     , m_functionExprs(other.m_functionExprs)
1645     , m_osrExitCounter(0)
1646     , m_optimizationDelayCounter(0)
1647     , m_reoptimizationRetryCounter(0)
1648     , m_hash(other.m_hash)
1649 #if ENABLE(JIT)
1650     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1651 #endif
1652 {
1653     ASSERT(m_heap->isDeferred());
1654     ASSERT(m_scopeRegister.isLocal());
1655
1656     if (SymbolTable* symbolTable = other.symbolTable())
1657         m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1658     
1659     setNumParameters(other.numParameters());
1660     optimizeAfterWarmUp();
1661     jitAfterWarmUp();
1662
1663     if (other.m_rareData) {
1664         createRareDataIfNecessary();
1665         
1666         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1667         m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1668         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1669         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1670     }
1671     
1672     m_heap->m_codeBlocks.add(this);
1673     m_heap->reportExtraMemoryCost(sizeof(CodeBlock));
1674 }
1675
1676 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1677     : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1678     , m_heap(&m_globalObject->vm().heap)
1679     , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1680     , m_numVars(unlinkedCodeBlock->m_numVars)
1681     , m_isConstructor(unlinkedCodeBlock->isConstructor())
1682     , m_shouldAlwaysBeInlined(true)
1683     , m_didFailFTLCompilation(false)
1684     , m_hasBeenCompiledWithFTL(false)
1685     , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1686     , m_hasDebuggerStatement(false)
1687     , m_steppingMode(SteppingModeDisabled)
1688     , m_numBreakpoints(0)
1689     , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1690     , m_vm(unlinkedCodeBlock->vm())
1691     , m_thisRegister(unlinkedCodeBlock->thisRegister())
1692     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
1693     , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
1694     , m_lexicalEnvironmentRegister(unlinkedCodeBlock->activationRegister())
1695     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1696     , m_needsActivation(unlinkedCodeBlock->hasActivationRegister() && unlinkedCodeBlock->codeType() == FunctionCode)
1697     , m_mayBeExecuting(false)
1698     , m_visitAggregateHasBeenCalled(false)
1699     , m_source(sourceProvider)
1700     , m_sourceOffset(sourceOffset)
1701     , m_firstLineColumnOffset(firstLineColumnOffset)
1702     , m_codeType(unlinkedCodeBlock->codeType())
1703     , m_osrExitCounter(0)
1704     , m_optimizationDelayCounter(0)
1705     , m_reoptimizationRetryCounter(0)
1706 #if ENABLE(JIT)
1707     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1708 #endif
1709 {
1710     ASSERT(m_heap->isDeferred());
1711     ASSERT(m_scopeRegister.isLocal());
1712
1713     bool didCloneSymbolTable = false;
1714     
1715     if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
1716         if (m_vm->typeProfiler()) {
1717             ConcurrentJITLocker locker(symbolTable->m_lock);
1718             symbolTable->prepareForTypeProfiling(locker);
1719         }
1720
1721         if (codeType() == FunctionCode && symbolTable->captureCount()) {
1722             m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->cloneCapturedNames(*m_vm));
1723             didCloneSymbolTable = true;
1724         } else
1725             m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1726     }
1727     
1728     ASSERT(m_source);
1729     setNumParameters(unlinkedCodeBlock->numParameters());
1730
1731     if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1732         vm()->functionHasExecutedCache()->removeUnexecutedRange(m_ownerExecutable->sourceID(), m_ownerExecutable->typeProfilingStartOffset(), m_ownerExecutable->typeProfilingEndOffset());
1733
1734     setConstantRegisters(unlinkedCodeBlock->constantRegisters());
1735     if (unlinkedCodeBlock->usesGlobalObject())
1736         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, ownerExecutable, m_globalObject.get());
1737     m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
1738     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1739         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1740         if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1741             vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1742         unsigned lineCount = unlinkedExecutable->lineCount();
1743         unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1744         bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
1745         unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
1746         bool endColumnIsOnStartLine = !lineCount;
1747         unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
1748         unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1749         unsigned sourceLength = unlinkedExecutable->sourceLength();
1750         SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1751         FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
1752         m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
1753     }
1754
1755     m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
1756     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1757         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1758         if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1759             vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1760         unsigned lineCount = unlinkedExecutable->lineCount();
1761         unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1762         bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
1763         unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
1764         bool endColumnIsOnStartLine = !lineCount;
1765         unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
1766         unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1767         unsigned sourceLength = unlinkedExecutable->sourceLength();
1768         SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1769         FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
1770         m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
1771     }
1772
1773     if (unlinkedCodeBlock->hasRareData()) {
1774         createRareDataIfNecessary();
1775         if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1776             m_rareData->m_constantBuffers.grow(count);
1777             for (size_t i = 0; i < count; i++) {
1778                 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1779                 m_rareData->m_constantBuffers[i] = buffer;
1780             }
1781         }
1782         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1783             m_rareData->m_exceptionHandlers.resizeToFit(count);
1784             size_t nonLocalScopeDepth = scope->depth();
1785             for (size_t i = 0; i < count; i++) {
1786                 const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
1787                 m_rareData->m_exceptionHandlers[i].start = handler.start;
1788                 m_rareData->m_exceptionHandlers[i].end = handler.end;
1789                 m_rareData->m_exceptionHandlers[i].target = handler.target;
1790                 m_rareData->m_exceptionHandlers[i].scopeDepth = nonLocalScopeDepth + handler.scopeDepth;
1791 #if ENABLE(JIT)
1792                 m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch)));
1793 #endif
1794             }
1795         }
1796
1797         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1798             m_rareData->m_stringSwitchJumpTables.grow(count);
1799             for (size_t i = 0; i < count; i++) {
1800                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1801                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1802                 for (; ptr != end; ++ptr) {
1803                     OffsetLocation offset;
1804                     offset.branchOffset = ptr->value;
1805                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1806                 }
1807             }
1808         }
1809
1810         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1811             m_rareData->m_switchJumpTables.grow(count);
1812             for (size_t i = 0; i < count; i++) {
1813                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1814                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1815                 destTable.branchOffsets = sourceTable.branchOffsets;
1816                 destTable.min = sourceTable.min;
1817             }
1818         }
1819     }
1820
1821     // Allocate metadata buffers for the bytecode
1822     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1823         m_llintCallLinkInfos.resizeToFit(size);
1824     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1825         m_arrayProfiles.grow(size);
1826     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1827         m_arrayAllocationProfiles.resizeToFit(size);
1828     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1829         m_valueProfiles.resizeToFit(size);
1830     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1831         m_objectAllocationProfiles.resizeToFit(size);
1832
1833     // Copy and translate the UnlinkedInstructions
1834     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
1835     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
1836
1837     Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1838     for (unsigned i = 0; !instructionReader.atEnd(); ) {
1839         const UnlinkedInstruction* pc = instructionReader.next();
1840
1841         unsigned opLength = opcodeLength(pc[0].u.opcode);
1842
1843         instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
1844         for (size_t j = 1; j < opLength; ++j) {
1845             if (sizeof(int32_t) != sizeof(intptr_t))
1846                 instructions[i + j].u.pointer = 0;
1847             instructions[i + j].u.operand = pc[j].u.operand;
1848         }
1849         switch (pc[0].u.opcode) {
1850         case op_has_indexed_property: {
1851             int arrayProfileIndex = pc[opLength - 1].u.operand;
1852             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1853
1854             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1855             break;
1856         }
1857         case op_call_varargs:
1858         case op_construct_varargs:
1859         case op_get_by_val:
1860         case op_get_argument_by_val: {
1861             int arrayProfileIndex = pc[opLength - 2].u.operand;
1862             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1863
1864             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1865             FALLTHROUGH;
1866         }
1867         case op_get_direct_pname:
1868         case op_get_by_id: {
1869             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1870             ASSERT(profile->m_bytecodeOffset == -1);
1871             profile->m_bytecodeOffset = i;
1872             instructions[i + opLength - 1] = profile;
1873             break;
1874         }
1875         case op_put_by_val: {
1876             int arrayProfileIndex = pc[opLength - 1].u.operand;
1877             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1878             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1879             break;
1880         }
1881         case op_put_by_val_direct: {
1882             int arrayProfileIndex = pc[opLength - 1].u.operand;
1883             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1884             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1885             break;
1886         }
1887
1888         case op_new_array:
1889         case op_new_array_buffer:
1890         case op_new_array_with_size: {
1891             int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
1892             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1893             break;
1894         }
1895         case op_new_object: {
1896             int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
1897             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1898             int inferredInlineCapacity = pc[opLength - 2].u.operand;
1899
1900             instructions[i + opLength - 1] = objectAllocationProfile;
1901             objectAllocationProfile->initialize(*vm(),
1902                 m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
1903             break;
1904         }
1905
1906         case op_call:
1907         case op_call_eval: {
1908             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1909             ASSERT(profile->m_bytecodeOffset == -1);
1910             profile->m_bytecodeOffset = i;
1911             instructions[i + opLength - 1] = profile;
1912             int arrayProfileIndex = pc[opLength - 2].u.operand;
1913             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1914             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1915             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1916             break;
1917         }
1918         case op_construct: {
1919             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1920             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1921             ASSERT(profile->m_bytecodeOffset == -1);
1922             profile->m_bytecodeOffset = i;
1923             instructions[i + opLength - 1] = profile;
1924             break;
1925         }
1926         case op_get_by_id_out_of_line:
1927         case op_get_array_length:
1928             CRASH();
1929
1930         case op_init_global_const_nop: {
1931             ASSERT(codeType() == GlobalCode);
1932             Identifier ident = identifier(pc[4].u.operand);
1933             SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
1934             if (entry.isNull())
1935                 break;
1936
1937             instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
1938             instructions[i + 1] = &m_globalObject->registerAt(entry.getIndex());
1939             break;
1940         }
1941
1942         case op_resolve_scope: {
1943             const Identifier& ident = identifier(pc[3].u.operand);
1944             ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
1945             if (type == LocalClosureVar) {
1946                 instructions[i + 4].u.operand = ClosureVar;
1947                 break;
1948             }
1949
1950             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Get, type);
1951             instructions[i + 4].u.operand = op.type;
1952             instructions[i + 5].u.operand = op.depth;
1953             if (op.lexicalEnvironment)
1954                 instructions[i + 6].u.lexicalEnvironment.set(*vm(), ownerExecutable, op.lexicalEnvironment);
1955             break;
1956         }
1957
1958         case op_get_from_scope: {
1959             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1960             ASSERT(profile->m_bytecodeOffset == -1);
1961             profile->m_bytecodeOffset = i;
1962             instructions[i + opLength - 1] = profile;
1963
1964             // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
1965
1966             const Identifier& ident = identifier(pc[3].u.operand);
1967             ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
1968             if (modeAndType.type() == LocalClosureVar) {
1969                 instructions[i + 4] = ResolveModeAndType(modeAndType.mode(), ClosureVar).operand();
1970                 break;
1971             }
1972
1973             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Get, modeAndType.type());
1974
1975             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1976             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
1977                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
1978             else if (op.structure)
1979                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1980             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1981
1982             break;
1983         }
1984
1985         case op_put_to_scope: {
1986             // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
1987             const Identifier& ident = identifier(pc[2].u.operand);
1988
1989             ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
1990             if (modeAndType.type() == LocalClosureVar) {
1991                 bool isWatchableVariable = pc[5].u.operand;
1992                 if (!isWatchableVariable) {
1993                     instructions[i + 5].u.watchpointSet = nullptr;
1994                     break;
1995                 }
1996                 StringImpl* uid = ident.impl();
1997                 RELEASE_ASSERT(didCloneSymbolTable);
1998                 if (ident != m_vm->propertyNames->arguments) {
1999                     ConcurrentJITLocker locker(m_symbolTable->m_lock);
2000                     SymbolTable::Map::iterator iter = m_symbolTable->find(locker, uid);
2001                     ASSERT(iter != m_symbolTable->end(locker));
2002                     iter->value.prepareToWatch(symbolTable());
2003                     instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
2004                 } else
2005                     instructions[i + 5].u.watchpointSet = nullptr;
2006                 break;
2007             }
2008
2009             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Put, modeAndType.type());
2010
2011             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
2012             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
2013                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2014             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
2015                 if (op.watchpointSet)
2016                     op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident));
2017             } else if (op.structure)
2018                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
2019             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2020
2021             break;
2022         }
2023
2024         case op_profile_type: {
2025             RELEASE_ASSERT(vm()->typeProfiler());
2026             // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
2027             size_t instructionOffset = i + opLength - 1;
2028             unsigned divotStart, divotEnd;
2029             GlobalVariableID globalVariableID = 0;
2030             RefPtr<TypeSet> globalTypeSet;
2031             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
2032             VirtualRegister profileRegister(pc[1].u.operand);
2033             ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
2034             SymbolTable* symbolTable = nullptr;
2035
2036             switch (flag) {
2037             case ProfileTypeBytecodePutToScope:
2038             case ProfileTypeBytecodeGetFromScope: {
2039                 const Identifier& ident = identifier(pc[4].u.operand);
2040                 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
2041                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, (flag == ProfileTypeBytecodeGetFromScope ? Get : Put), type);
2042
2043                 // FIXME: handle other values for op.type here, and also consider what to do when we can't statically determine the globalID
2044                 // https://bugs.webkit.org/show_bug.cgi?id=135184
2045                 if (op.type == ClosureVar)
2046                     symbolTable = op.lexicalEnvironment->symbolTable();
2047                 else if (op.type == GlobalVar)
2048                     symbolTable = m_globalObject.get()->symbolTable();
2049                 
2050                 if (symbolTable) {
2051                     ConcurrentJITLocker locker(symbolTable->m_lock);
2052                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2053                     symbolTable->prepareForTypeProfiling(locker);
2054                     globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2055                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2056                 } else
2057                     globalVariableID = TypeProfilerNoGlobalIDExists;
2058
2059                 break;
2060             }
2061             case ProfileTypeBytecodePutToLocalScope:
2062             case ProfileTypeBytecodeGetFromLocalScope: {
2063                 const Identifier& ident = identifier(pc[4].u.operand);
2064                 symbolTable = m_symbolTable.get();
2065                 ConcurrentJITLocker locker(symbolTable->m_lock);
2066                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2067                 symbolTable->prepareForTypeProfiling(locker);
2068                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2069                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2070
2071                 break;
2072             }
2073
2074             case ProfileTypeBytecodeHasGlobalID: {
2075                 symbolTable = m_symbolTable.get();
2076                 ConcurrentJITLocker locker(symbolTable->m_lock);
2077                 globalVariableID = symbolTable->uniqueIDForRegister(locker, profileRegister.offset(), *vm());
2078                 globalTypeSet = symbolTable->globalTypeSetForRegister(locker, profileRegister.offset(), *vm());
2079                 break;
2080             }
2081             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
2082             case ProfileTypeBytecodeFunctionArgument: {
2083                 globalVariableID = TypeProfilerNoGlobalIDExists;
2084                 break;
2085             }
2086             case ProfileTypeBytecodeFunctionReturnStatement: {
2087                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
2088                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
2089                 globalVariableID = TypeProfilerReturnStatement;
2090                 if (!shouldAnalyze) {
2091                     // Because a return statement can be added implicitly to return undefined at the end of a function,
2092                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
2093                     // the user's program, give the type profiler some range to identify these return statements.
2094                     // Currently, the text offset that is used as identification is on the open brace of the function 
2095                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
2096                     divotStart = divotEnd = m_sourceOffset;
2097                     shouldAnalyze = true;
2098                 }
2099                 break;
2100             }
2101             }
2102
2103             std::pair<TypeLocation*, bool> locationPair = vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
2104                 m_ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, vm());
2105             TypeLocation* location = locationPair.first;
2106             bool isNewLocation = locationPair.second;
2107
2108             if (flag == ProfileTypeBytecodeFunctionReturnStatement)
2109                 location->m_divotForFunctionOffsetIfReturnStatement = m_sourceOffset;
2110
2111             if (shouldAnalyze && isNewLocation)
2112                 vm()->typeProfiler()->insertNewLocation(location);
2113
2114             instructions[i + 2].u.location = location;
2115             break;
2116         }
2117
2118         case op_debug: {
2119             if (pc[1].u.index == DidReachBreakpoint)
2120                 m_hasDebuggerStatement = true;
2121             break;
2122         }
2123
2124         default:
2125             break;
2126         }
2127         i += opLength;
2128     }
2129
2130     if (vm()->controlFlowProfiler())
2131         insertBasicBlockBoundariesForControlFlowProfiler(instructions);
2132
2133     m_instructions = WTF::RefCountedArray<Instruction>(instructions);
2134
2135     // Set optimization thresholds only after m_instructions is initialized, since these
2136     // rely on the instruction count (and are in theory permitted to also inspect the
2137     // instruction stream to more accurate assess the cost of tier-up).
2138     optimizeAfterWarmUp();
2139     jitAfterWarmUp();
2140
2141     // If the concurrent thread will want the code block's hash, then compute it here
2142     // synchronously.
2143     if (Options::alwaysComputeHash())
2144         hash();
2145
2146     if (Options::dumpGeneratedBytecodes())
2147         dumpBytecode();
2148     
2149     m_heap->m_codeBlocks.add(this);
2150     m_heap->reportExtraMemoryCost(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
2151 }
2152
2153 CodeBlock::~CodeBlock()
2154 {
2155     if (m_vm->m_perBytecodeProfiler)
2156         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
2157     
2158 #if ENABLE(VERBOSE_VALUE_PROFILE)
2159     dumpValueProfiles();
2160 #endif
2161     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2162         m_incomingLLIntCalls.begin()->remove();
2163 #if ENABLE(JIT)
2164     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
2165     // Consider that two CodeBlocks become unreachable at the same time. There
2166     // is no guarantee about the order in which the CodeBlocks are destroyed.
2167     // So, if we don't remove incoming calls, and get destroyed before the
2168     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
2169     // destructor will try to remove nodes from our (no longer valid) linked list.
2170     while (m_incomingCalls.begin() != m_incomingCalls.end())
2171         m_incomingCalls.begin()->remove();
2172     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
2173         m_incomingPolymorphicCalls.begin()->remove();
2174     
2175     // Note that our outgoing calls will be removed from other CodeBlocks'
2176     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
2177     // destructors.
2178
2179     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
2180         (*iter)->deref();
2181 #endif // ENABLE(JIT)
2182 }
2183
2184 void CodeBlock::setNumParameters(int newValue)
2185 {
2186     m_numParameters = newValue;
2187
2188     m_argumentValueProfiles.resizeToFit(newValue);
2189 }
2190
2191 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
2192 {
2193     EvalCacheMap::iterator end = m_cacheMap.end();
2194     for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
2195         visitor.append(&ptr->value);
2196 }
2197
2198 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
2199 {
2200 #if ENABLE(FTL_JIT)
2201     if (jitType() != JITCode::DFGJIT)
2202         return 0;
2203     DFG::JITCode* jitCode = m_jitCode->dfg();
2204     return jitCode->osrEntryBlock.get();
2205 #else // ENABLE(FTL_JIT)
2206     return 0;
2207 #endif // ENABLE(FTL_JIT)
2208 }
2209
2210 void CodeBlock::visitAggregate(SlotVisitor& visitor)
2211 {
2212 #if ENABLE(PARALLEL_GC)
2213     // I may be asked to scan myself more than once, and it may even happen concurrently.
2214     // To this end, use a CAS loop to check if I've been called already. Only one thread
2215     // may proceed past this point - whichever one wins the CAS race.
2216     unsigned oldValue;
2217     do {
2218         oldValue = m_visitAggregateHasBeenCalled;
2219         if (oldValue) {
2220             // Looks like someone else won! Return immediately to ensure that we don't
2221             // trace the same CodeBlock concurrently. Doing so is hazardous since we will
2222             // be mutating the state of ValueProfiles, which contain JSValues, which can
2223             // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
2224             // that are nearly impossible to track down.
2225             
2226             // Also note that it must be safe to return early as soon as we see the
2227             // value true (well, (unsigned)1), since once a GC thread is in this method
2228             // and has won the CAS race (i.e. was responsible for setting the value true)
2229             // it will definitely complete the rest of this method before declaring
2230             // termination.
2231             return;
2232         }
2233     } while (!WTF::weakCompareAndSwap(&m_visitAggregateHasBeenCalled, 0, 1));
2234 #endif // ENABLE(PARALLEL_GC)
2235     
2236     if (!!m_alternative)
2237         m_alternative->visitAggregate(visitor);
2238     
2239     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2240         otherBlock->visitAggregate(visitor);
2241
2242     visitor.reportExtraMemoryUsage(ownerExecutable(), sizeof(CodeBlock));
2243     if (m_jitCode)
2244         visitor.reportExtraMemoryUsage(ownerExecutable(), m_jitCode->size());
2245     if (m_instructions.size()) {
2246         // Divide by refCount() because m_instructions points to something that is shared
2247         // by multiple CodeBlocks, and we only want to count it towards the heap size once.
2248         // Having each CodeBlock report only its proportional share of the size is one way
2249         // of accomplishing this.
2250         visitor.reportExtraMemoryUsage(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
2251     }
2252
2253     visitor.append(&m_unlinkedCode);
2254
2255     // There are three things that may use unconditional finalizers: lazy bytecode freeing,
2256     // inline cache clearing, and jettisoning. The probability of us wanting to do at
2257     // least one of those things is probably quite close to 1. So we add one no matter what
2258     // and when it runs, it figures out whether it has any work to do.
2259     visitor.addUnconditionalFinalizer(this);
2260     
2261     m_allTransitionsHaveBeenMarked = false;
2262     
2263     if (shouldImmediatelyAssumeLivenessDuringScan()) {
2264         // This code block is live, so scan all references strongly and return.
2265         stronglyVisitStrongReferences(visitor);
2266         stronglyVisitWeakReferences(visitor);
2267         propagateTransitions(visitor);
2268         return;
2269     }
2270     
2271     // There are two things that we use weak reference harvesters for: DFG fixpoint for
2272     // jettisoning, and trying to find structures that would be live based on some
2273     // inline cache. So it makes sense to register them regardless.
2274     visitor.addWeakReferenceHarvester(this);
2275
2276 #if ENABLE(DFG_JIT)
2277     // We get here if we're live in the sense that our owner executable is live,
2278     // but we're not yet live for sure in another sense: we may yet decide that this
2279     // code block should be jettisoned based on its outgoing weak references being
2280     // stale. Set a flag to indicate that we're still assuming that we're dead, and
2281     // perform one round of determining if we're live. The GC may determine, based on
2282     // either us marking additional objects, or by other objects being marked for
2283     // other reasons, that this iteration should run again; it will notify us of this
2284     // decision by calling harvestWeakReferences().
2285     
2286     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
2287     
2288     propagateTransitions(visitor);
2289     determineLiveness(visitor);
2290 #else // ENABLE(DFG_JIT)
2291     RELEASE_ASSERT_NOT_REACHED();
2292 #endif // ENABLE(DFG_JIT)
2293 }
2294
2295 bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan()
2296 {
2297 #if ENABLE(DFG_JIT)
2298     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
2299     // their weak references go stale. So if a basline JIT CodeBlock gets
2300     // scanned, we can assume that this means that it's live.
2301     if (!JITCode::isOptimizingJIT(jitType()))
2302         return true;
2303
2304     // For simplicity, we don't attempt to jettison code blocks during GC if
2305     // they are executing. Instead we strongly mark their weak references to
2306     // allow them to continue to execute soundly.
2307     if (m_mayBeExecuting)
2308         return true;
2309
2310     if (Options::forceDFGCodeBlockLiveness())
2311         return true;
2312
2313     return false;
2314 #else
2315     return true;
2316 #endif
2317 }
2318
2319 bool CodeBlock::isKnownToBeLiveDuringGC()
2320 {
2321 #if ENABLE(DFG_JIT)
2322     // This should return true for:
2323     // - Code blocks that behave like normal objects - i.e. if they are referenced then they
2324     //   are live.
2325     // - Code blocks that were running on the stack.
2326     // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
2327     //   because either livenessHasBeenProved would have survived as true or m_mayBeExecuting
2328     //   would survive as true.
2329     // - Code blocks that don't have any dead weak references.
2330     
2331     return shouldImmediatelyAssumeLivenessDuringScan()
2332         || m_jitCode->dfgCommon()->livenessHasBeenProved;
2333 #else
2334     return true;
2335 #endif
2336 }
2337
2338 #if ENABLE(DFG_JIT)
2339 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
2340 {
2341     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
2342         return false;
2343     
2344     if (!Heap::isMarked(transition.m_from.get()))
2345         return false;
2346     
2347     return true;
2348 }
2349 #endif // ENABLE(DFG_JIT)
2350
2351 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
2352 {
2353     UNUSED_PARAM(visitor);
2354
2355     if (m_allTransitionsHaveBeenMarked)
2356         return;
2357
2358     bool allAreMarkedSoFar = true;
2359         
2360     Interpreter* interpreter = m_vm->interpreter;
2361     if (jitType() == JITCode::InterpreterThunk) {
2362         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2363         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2364             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2365             switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2366             case op_put_by_id_transition_direct:
2367             case op_put_by_id_transition_normal:
2368             case op_put_by_id_transition_direct_out_of_line:
2369             case op_put_by_id_transition_normal_out_of_line: {
2370                 if (Heap::isMarked(instruction[4].u.structure.get()))
2371                     visitor.append(&instruction[6].u.structure);
2372                 else
2373                     allAreMarkedSoFar = false;
2374                 break;
2375             }
2376             default:
2377                 break;
2378             }
2379         }
2380     }
2381
2382 #if ENABLE(JIT)
2383     if (JITCode::isJIT(jitType())) {
2384         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2385             StructureStubInfo& stubInfo = **iter;
2386             switch (stubInfo.accessType) {
2387             case access_put_by_id_transition_normal:
2388             case access_put_by_id_transition_direct: {
2389                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2390                 if ((!origin || Heap::isMarked(origin))
2391                     && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
2392                     visitor.append(&stubInfo.u.putByIdTransition.structure);
2393                 else
2394                     allAreMarkedSoFar = false;
2395                 break;
2396             }
2397
2398             case access_put_by_id_list: {
2399                 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
2400                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2401                 if (origin && !Heap::isMarked(origin)) {
2402                     allAreMarkedSoFar = false;
2403                     break;
2404                 }
2405                 for (unsigned j = list->size(); j--;) {
2406                     PutByIdAccess& access = list->m_list[j];
2407                     if (!access.isTransition())
2408                         continue;
2409                     if (Heap::isMarked(access.oldStructure()))
2410                         visitor.append(&access.m_newStructure);
2411                     else
2412                         allAreMarkedSoFar = false;
2413                 }
2414                 break;
2415             }
2416             
2417             default:
2418                 break;
2419             }
2420         }
2421     }
2422 #endif // ENABLE(JIT)
2423     
2424 #if ENABLE(DFG_JIT)
2425     if (JITCode::isOptimizingJIT(jitType())) {
2426         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2427         
2428         for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2429             if (shouldMarkTransition(dfgCommon->transitions[i])) {
2430                 // If the following three things are live, then the target of the
2431                 // transition is also live:
2432                 //
2433                 // - This code block. We know it's live already because otherwise
2434                 //   we wouldn't be scanning ourselves.
2435                 //
2436                 // - The code origin of the transition. Transitions may arise from
2437                 //   code that was inlined. They are not relevant if the user's
2438                 //   object that is required for the inlinee to run is no longer
2439                 //   live.
2440                 //
2441                 // - The source of the transition. The transition checks if some
2442                 //   heap location holds the source, and if so, stores the target.
2443                 //   Hence the source must be live for the transition to be live.
2444                 //
2445                 // We also short-circuit the liveness if the structure is harmless
2446                 // to mark (i.e. its global object and prototype are both already
2447                 // live).
2448                 
2449                 visitor.append(&dfgCommon->transitions[i].m_to);
2450             } else
2451                 allAreMarkedSoFar = false;
2452         }
2453     }
2454 #endif // ENABLE(DFG_JIT)
2455     
2456     if (allAreMarkedSoFar)
2457         m_allTransitionsHaveBeenMarked = true;
2458 }
2459
2460 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2461 {
2462     UNUSED_PARAM(visitor);
2463     
2464     if (shouldImmediatelyAssumeLivenessDuringScan())
2465         return;
2466     
2467 #if ENABLE(DFG_JIT)
2468     // Check if we have any remaining work to do.
2469     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2470     if (dfgCommon->livenessHasBeenProved)
2471         return;
2472     
2473     // Now check all of our weak references. If all of them are live, then we
2474     // have proved liveness and so we scan our strong references. If at end of
2475     // GC we still have not proved liveness, then this code block is toast.
2476     bool allAreLiveSoFar = true;
2477     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2478         if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2479             allAreLiveSoFar = false;
2480             break;
2481         }
2482     }
2483     if (allAreLiveSoFar) {
2484         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
2485             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
2486                 allAreLiveSoFar = false;
2487                 break;
2488             }
2489         }
2490     }
2491     
2492     // If some weak references are dead, then this fixpoint iteration was
2493     // unsuccessful.
2494     if (!allAreLiveSoFar)
2495         return;
2496     
2497     // All weak references are live. Record this information so we don't
2498     // come back here again, and scan the strong references.
2499     dfgCommon->livenessHasBeenProved = true;
2500     stronglyVisitStrongReferences(visitor);
2501 #endif // ENABLE(DFG_JIT)
2502 }
2503
2504 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2505 {
2506     propagateTransitions(visitor);
2507     determineLiveness(visitor);
2508 }
2509
2510 void CodeBlock::finalizeUnconditionally()
2511 {
2512     Interpreter* interpreter = m_vm->interpreter;
2513     if (JITCode::couldBeInterpreted(jitType())) {
2514         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2515         for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2516             Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2517             switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2518             case op_get_by_id:
2519             case op_get_by_id_out_of_line:
2520             case op_put_by_id:
2521             case op_put_by_id_out_of_line:
2522                 if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2523                     break;
2524                 if (Options::verboseOSR())
2525                     dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2526                 curInstruction[4].u.structure.clear();
2527                 curInstruction[5].u.operand = 0;
2528                 break;
2529             case op_put_by_id_transition_direct:
2530             case op_put_by_id_transition_normal:
2531             case op_put_by_id_transition_direct_out_of_line:
2532             case op_put_by_id_transition_normal_out_of_line:
2533                 if (Heap::isMarked(curInstruction[4].u.structure.get())
2534                     && Heap::isMarked(curInstruction[6].u.structure.get())
2535                     && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2536                     break;
2537                 if (Options::verboseOSR()) {
2538                     dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2539                             curInstruction[4].u.structure.get(),
2540                             curInstruction[6].u.structure.get(),
2541                             curInstruction[7].u.structureChain.get());
2542                 }
2543                 curInstruction[4].u.structure.clear();
2544                 curInstruction[6].u.structure.clear();
2545                 curInstruction[7].u.structureChain.clear();
2546                 curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2547                 break;
2548             case op_get_array_length:
2549                 break;
2550             case op_to_this:
2551                 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2552                     break;
2553                 if (Options::verboseOSR())
2554                     dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2555                 curInstruction[2].u.structure.clear();
2556                 curInstruction[3].u.toThisStatus = merge(
2557                     curInstruction[3].u.toThisStatus, ToThisClearedByGC);
2558                 break;
2559             case op_get_callee:
2560                 if (!curInstruction[2].u.jsCell || Heap::isMarked(curInstruction[2].u.jsCell.get()))
2561                     break;
2562                 if (Options::verboseOSR())
2563                     dataLogF("Clearing LLInt get callee with function %p.\n", curInstruction[2].u.jsCell.get());
2564                 curInstruction[2].u.jsCell.clear();
2565                 break;
2566             case op_resolve_scope: {
2567                 WriteBarrierBase<JSLexicalEnvironment>& lexicalEnvironment = curInstruction[6].u.lexicalEnvironment;
2568                 if (!lexicalEnvironment || Heap::isMarked(lexicalEnvironment.get()))
2569                     break;
2570                 if (Options::verboseOSR())
2571                     dataLogF("Clearing dead lexicalEnvironment %p.\n", lexicalEnvironment.get());
2572                 lexicalEnvironment.clear();
2573                 break;
2574             }
2575             case op_get_from_scope:
2576             case op_put_to_scope: {
2577                 ResolveModeAndType modeAndType =
2578                     ResolveModeAndType(curInstruction[4].u.operand);
2579                 if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks || modeAndType.type() == LocalClosureVar)
2580                     continue;
2581                 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2582                 if (!structure || Heap::isMarked(structure.get()))
2583                     break;
2584                 if (Options::verboseOSR())
2585                     dataLogF("Clearing scope access with structure %p.\n", structure.get());
2586                 structure.clear();
2587                 break;
2588             }
2589             default:
2590                 RELEASE_ASSERT_NOT_REACHED();
2591             }
2592         }
2593
2594         for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2595             if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2596                 if (Options::verboseOSR())
2597                     dataLog("Clearing LLInt call from ", *this, "\n");
2598                 m_llintCallLinkInfos[i].unlink();
2599             }
2600             if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2601                 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2602         }
2603     }
2604
2605 #if ENABLE(DFG_JIT)
2606     // Check if we're not live. If we are, then jettison.
2607     if (!isKnownToBeLiveDuringGC()) {
2608         if (Options::verboseOSR())
2609             dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2610
2611         if (DFG::shouldShowDisassembly()) {
2612             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2613             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2614             for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2615                 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
2616                 JSCell* origin = transition.m_codeOrigin.get();
2617                 JSCell* from = transition.m_from.get();
2618                 JSCell* to = transition.m_to.get();
2619                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2620                     continue;
2621                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2622             }
2623             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2624                 JSCell* weak = dfgCommon->weakReferences[i].get();
2625                 if (Heap::isMarked(weak))
2626                     continue;
2627                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2628             }
2629         }
2630         
2631         jettison(Profiler::JettisonDueToWeakReference);
2632         return;
2633     }
2634 #endif // ENABLE(DFG_JIT)
2635
2636 #if ENABLE(JIT)
2637     // Handle inline caches.
2638     if (!!jitCode()) {
2639         RepatchBuffer repatchBuffer(this);
2640         
2641         for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
2642             (*iter)->visitWeak(repatchBuffer);
2643
2644         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2645             StructureStubInfo& stubInfo = **iter;
2646             
2647             if (stubInfo.visitWeakReferences(repatchBuffer))
2648                 continue;
2649             
2650             resetStubDuringGCInternal(repatchBuffer, stubInfo);
2651         }
2652     }
2653 #endif
2654 }
2655
2656 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
2657 {
2658 #if ENABLE(JIT)
2659     toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2660 #else
2661     UNUSED_PARAM(result);
2662 #endif
2663 }
2664
2665 void CodeBlock::getStubInfoMap(StubInfoMap& result)
2666 {
2667     ConcurrentJITLocker locker(m_lock);
2668     getStubInfoMap(locker, result);
2669 }
2670
2671 void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
2672 {
2673 #if ENABLE(JIT)
2674     toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
2675 #else
2676     UNUSED_PARAM(result);
2677 #endif
2678 }
2679
2680 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
2681 {
2682     ConcurrentJITLocker locker(m_lock);
2683     getCallLinkInfoMap(locker, result);
2684 }
2685
2686 #if ENABLE(JIT)
2687 StructureStubInfo* CodeBlock::addStubInfo()
2688 {
2689     ConcurrentJITLocker locker(m_lock);
2690     return m_stubInfos.add();
2691 }
2692
2693 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
2694 {
2695     for (StructureStubInfo* stubInfo : m_stubInfos) {
2696         if (stubInfo->codeOrigin == codeOrigin)
2697             return stubInfo;
2698     }
2699     return nullptr;
2700 }
2701
2702 CallLinkInfo* CodeBlock::addCallLinkInfo()
2703 {
2704     ConcurrentJITLocker locker(m_lock);
2705     return m_callLinkInfos.add();
2706 }
2707
2708 void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2709 {
2710     if (stubInfo.accessType == access_unset)
2711         return;
2712     
2713     ConcurrentJITLocker locker(m_lock);
2714     
2715     RepatchBuffer repatchBuffer(this);
2716     resetStubInternal(repatchBuffer, stubInfo);
2717 }
2718
2719 void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2720 {
2721     AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2722     
2723     if (Options::verboseOSR()) {
2724         // This can be called from GC destructor calls, so we don't try to do a full dump
2725         // of the CodeBlock.
2726         dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
2727     }
2728     
2729     RELEASE_ASSERT(JITCode::isJIT(jitType()));
2730     
2731     if (isGetByIdAccess(accessType))
2732         resetGetByID(repatchBuffer, stubInfo);
2733     else if (isPutByIdAccess(accessType))
2734         resetPutByID(repatchBuffer, stubInfo);
2735     else {
2736         RELEASE_ASSERT(isInAccess(accessType));
2737         resetIn(repatchBuffer, stubInfo);
2738     }
2739     
2740     stubInfo.reset();
2741 }
2742
2743 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2744 {
2745     resetStubInternal(repatchBuffer, stubInfo);
2746     stubInfo.resetByGC = true;
2747 }
2748
2749 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
2750 {
2751     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2752         if ((*iter)->codeOrigin == CodeOrigin(index))
2753             return *iter;
2754     }
2755     return nullptr;
2756 }
2757 #endif
2758
2759 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2760 {
2761     visitor.append(&m_globalObject);
2762     visitor.append(&m_ownerExecutable);
2763     visitor.append(&m_symbolTable);
2764     visitor.append(&m_unlinkedCode);
2765     if (m_rareData)
2766         m_rareData->m_evalCodeCache.visitAggregate(visitor);
2767     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2768     for (size_t i = 0; i < m_functionExprs.size(); ++i)
2769         visitor.append(&m_functionExprs[i]);
2770     for (size_t i = 0; i < m_functionDecls.size(); ++i)
2771         visitor.append(&m_functionDecls[i]);
2772     for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2773         m_objectAllocationProfiles[i].visitAggregate(visitor);
2774
2775 #if ENABLE(DFG_JIT)
2776     if (JITCode::isOptimizingJIT(jitType())) {
2777         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2778         if (dfgCommon->inlineCallFrames.get())
2779             dfgCommon->inlineCallFrames->visitAggregate(visitor);
2780     }
2781 #endif
2782
2783     updateAllPredictions();
2784 }
2785
2786 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2787 {
2788     UNUSED_PARAM(visitor);
2789
2790 #if ENABLE(DFG_JIT)
2791     if (!JITCode::isOptimizingJIT(jitType()))
2792         return;
2793     
2794     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2795
2796     for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2797         if (!!dfgCommon->transitions[i].m_codeOrigin)
2798             visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2799         visitor.append(&dfgCommon->transitions[i].m_from);
2800         visitor.append(&dfgCommon->transitions[i].m_to);
2801     }
2802     
2803     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2804         visitor.append(&dfgCommon->weakReferences[i]);
2805
2806     for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
2807         visitor.append(&dfgCommon->weakStructureReferences[i]);
2808 #endif    
2809 }
2810
2811 CodeBlock* CodeBlock::baselineAlternative()
2812 {
2813 #if ENABLE(JIT)
2814     CodeBlock* result = this;
2815     while (result->alternative())
2816         result = result->alternative();
2817     RELEASE_ASSERT(result);
2818     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
2819     return result;
2820 #else
2821     return this;
2822 #endif
2823 }
2824
2825 CodeBlock* CodeBlock::baselineVersion()
2826 {
2827 #if ENABLE(JIT)
2828     if (JITCode::isBaselineCode(jitType()))
2829         return this;
2830     CodeBlock* result = replacement();
2831     if (!result) {
2832         // This can happen if we're creating the original CodeBlock for an executable.
2833         // Assume that we're the baseline CodeBlock.
2834         RELEASE_ASSERT(jitType() == JITCode::None);
2835         return this;
2836     }
2837     result = result->baselineAlternative();
2838     return result;
2839 #else
2840     return this;
2841 #endif
2842 }
2843
2844 #if ENABLE(JIT)
2845 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
2846 {
2847     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
2848 }
2849
2850 bool CodeBlock::hasOptimizedReplacement()
2851 {
2852     return hasOptimizedReplacement(jitType());
2853 }
2854 #endif
2855
2856 bool CodeBlock::isCaptured(VirtualRegister operand, InlineCallFrame* inlineCallFrame) const
2857 {
2858     if (operand.isArgument())
2859         return operand.toArgument() && usesArguments();
2860
2861     if (inlineCallFrame)
2862         return inlineCallFrame->capturedVars.get(operand.toLocal());
2863
2864     // The lexical environment object isn't in the captured region, but it's "captured"
2865     // in the sense that stores to its location can be observed indirectly.
2866     if (needsActivation() && operand == activationRegister())
2867         return true;
2868
2869     // Ditto for the arguments object.
2870     if (usesArguments() && operand == argumentsRegister())
2871         return true;
2872     if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
2873         return true;
2874
2875     // We're in global code so there are no locals to capture
2876     if (!symbolTable())
2877         return false;
2878
2879     return symbolTable()->isCaptured(operand.offset());
2880 }
2881
2882 int CodeBlock::framePointerOffsetToGetActivationRegisters(int machineCaptureStart)
2883 {
2884     // We'll be adding this to the stack pointer to get a registers pointer that looks
2885     // like it would have looked in the baseline engine. For example, if bytecode would
2886     // have put the first captured variable at offset -5 but we put it at offset -1, then
2887     // we'll have an offset of 4.
2888     int32_t offset = 0;
2889     
2890     // Compute where we put the captured variables. This offset will point the registers
2891     // pointer directly at the first captured var.
2892     offset += machineCaptureStart;
2893     
2894     // Now compute the offset needed to make the runtime see the captured variables at the
2895     // same offset that the bytecode would have used.
2896     offset -= symbolTable()->captureStart();
2897     
2898     return offset;
2899 }
2900
2901 int CodeBlock::framePointerOffsetToGetActivationRegisters()
2902 {
2903     if (!JITCode::isOptimizingJIT(jitType()))
2904         return 0;
2905 #if ENABLE(DFG_JIT)
2906     return framePointerOffsetToGetActivationRegisters(jitCode()->dfgCommon()->machineCaptureStart);
2907 #else
2908     RELEASE_ASSERT_NOT_REACHED();
2909     return 0;
2910 #endif
2911 }
2912
2913 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
2914 {
2915     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2916
2917     if (!m_rareData)
2918         return 0;
2919     
2920     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2921     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2922         // Handlers are ordered innermost first, so the first handler we encounter
2923         // that contains the source address is the correct handler to use.
2924         if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
2925             return &exceptionHandlers[i];
2926     }
2927
2928     return 0;
2929 }
2930
2931 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2932 {
2933     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2934     return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2935 }
2936
2937 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2938 {
2939     int divot;
2940     int startOffset;
2941     int endOffset;
2942     unsigned line;
2943     unsigned column;
2944     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2945     return column;
2946 }
2947
2948 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2949 {
2950     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2951     divot += m_sourceOffset;
2952     column += line ? 1 : firstLineColumnOffset();
2953     line += m_ownerExecutable->lineNo();
2954 }
2955
2956 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
2957 {
2958     Interpreter* interpreter = vm()->interpreter;
2959     const Instruction* begin = instructions().begin();
2960     const Instruction* end = instructions().end();
2961     for (const Instruction* it = begin; it != end;) {
2962         OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
2963         if (opcodeID == op_debug) {
2964             unsigned bytecodeOffset = it - begin;
2965             int unused;
2966             unsigned opDebugLine;
2967             unsigned opDebugColumn;
2968             expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
2969             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
2970                 return true;
2971         }
2972         it += opcodeLengths[opcodeID];
2973     }
2974     return false;
2975 }
2976
2977 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2978 {
2979     m_rareCaseProfiles.shrinkToFit();
2980     m_specialFastCaseProfiles.shrinkToFit();
2981     
2982     if (shrinkMode == EarlyShrink) {
2983         m_constantRegisters.shrinkToFit();
2984         
2985         if (m_rareData) {
2986             m_rareData->m_switchJumpTables.shrinkToFit();
2987             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2988         }
2989     } // else don't shrink these, because we would have already pointed pointers into these tables.
2990 }
2991
2992 unsigned CodeBlock::addOrFindConstant(JSValue v)
2993 {
2994     unsigned result;
2995     if (findConstant(v, result))
2996         return result;
2997     return addConstant(v);
2998 }
2999
3000 bool CodeBlock::findConstant(JSValue v, unsigned& index)
3001 {
3002     unsigned numberOfConstants = numberOfConstantRegisters();
3003     for (unsigned i = 0; i < numberOfConstants; ++i) {
3004         if (getConstant(FirstConstantRegisterIndex + i) == v) {
3005             index = i;
3006             return true;
3007         }
3008     }
3009     index = numberOfConstants;
3010     return false;
3011 }
3012
3013 #if ENABLE(JIT)
3014 void CodeBlock::unlinkCalls()
3015 {
3016     if (!!m_alternative)
3017         m_alternative->unlinkCalls();
3018     for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
3019         if (m_llintCallLinkInfos[i].isLinked())
3020             m_llintCallLinkInfos[i].unlink();
3021     }
3022     if (m_callLinkInfos.isEmpty())
3023         return;
3024     if (!m_vm->canUseJIT())
3025         return;
3026     RepatchBuffer repatchBuffer(this);
3027     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
3028         CallLinkInfo& info = **iter;
3029         if (!info.isLinked())
3030             continue;
3031         info.unlink(repatchBuffer);
3032     }
3033 }
3034
3035 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
3036 {
3037     noticeIncomingCall(callerFrame);
3038     m_incomingCalls.push(incoming);
3039 }
3040
3041 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
3042 {
3043     noticeIncomingCall(callerFrame);
3044     m_incomingPolymorphicCalls.push(incoming);
3045 }
3046 #endif // ENABLE(JIT)
3047
3048 void CodeBlock::unlinkIncomingCalls()
3049 {
3050     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
3051         m_incomingLLIntCalls.begin()->unlink();
3052 #if ENABLE(JIT)
3053     if (m_incomingCalls.isEmpty() && m_incomingPolymorphicCalls.isEmpty())
3054         return;
3055     RepatchBuffer repatchBuffer(this);
3056     while (m_incomingCalls.begin() != m_incomingCalls.end())
3057         m_incomingCalls.begin()->unlink(repatchBuffer);
3058     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
3059         m_incomingPolymorphicCalls.begin()->unlink(repatchBuffer);
3060 #endif // ENABLE(JIT)
3061 }
3062
3063 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
3064 {
3065     noticeIncomingCall(callerFrame);
3066     m_incomingLLIntCalls.push(incoming);
3067 }
3068
3069 void CodeBlock::clearEvalCache()
3070 {
3071     if (!!m_alternative)
3072         m_alternative->clearEvalCache();
3073     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
3074         otherBlock->clearEvalCache();
3075     if (!m_rareData)
3076         return;
3077     m_rareData->m_evalCodeCache.clear();
3078 }
3079
3080 void CodeBlock::install()
3081 {
3082     ownerExecutable()->installCode(this);
3083 }
3084
3085 PassRefPtr<CodeBlock> CodeBlock::newReplacement()
3086 {
3087     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
3088 }
3089
3090 const SlowArgument* CodeBlock::machineSlowArguments()
3091 {
3092     if (!JITCode::isOptimizingJIT(jitType()))
3093         return symbolTable()->slowArguments();
3094     
3095 #if ENABLE(DFG_JIT)
3096     return jitCode()->dfgCommon()->slowArguments.get();
3097 #else // ENABLE(DFG_JIT)
3098     return 0;
3099 #endif // ENABLE(DFG_JIT)
3100 }
3101
3102 #if ENABLE(JIT)
3103 CodeBlock* ProgramCodeBlock::replacement()
3104 {
3105     return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
3106 }
3107
3108 CodeBlock* EvalCodeBlock::replacement()
3109 {
3110     return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
3111 }
3112
3113 CodeBlock* FunctionCodeBlock::replacement()
3114 {
3115     return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
3116 }
3117
3118 DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
3119 {
3120     return DFG::programCapabilityLevel(this);
3121 }
3122
3123 DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
3124 {
3125     return DFG::evalCapabilityLevel(this);
3126 }
3127
3128 DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
3129 {
3130     if (m_isConstructor)
3131         return DFG::functionForConstructCapabilityLevel(this);
3132     return DFG::functionForCallCapabilityLevel(this);
3133 }
3134 #endif
3135
3136 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
3137 {
3138     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
3139     
3140 #if ENABLE(DFG_JIT)
3141     if (DFG::shouldShowDisassembly()) {
3142         dataLog("Jettisoning ", *this);
3143         if (mode == CountReoptimization)
3144             dataLog(" and counting reoptimization");
3145         dataLog(" due to ", reason);
3146         if (detail)
3147             dataLog(", ", *detail);
3148         dataLog(".\n");
3149     }
3150     
3151     DeferGCForAWhile deferGC(*m_heap);
3152     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
3153     
3154     if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
3155         compilation->setJettisonReason(reason, detail);
3156     
3157     // We want to accomplish two things here:
3158     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
3159     //    we should OSR exit at the top of the next bytecode instruction after the return.
3160     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
3161     
3162     // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
3163     // whether the invalidation has already happened.
3164     if (!jitCode()->dfgCommon()->invalidate()) {
3165         // Nothing to do since we've already been invalidated. That means that we cannot be
3166         // the optimized replacement.
3167         RELEASE_ASSERT(this != replacement());
3168         return;
3169     }
3170     
3171     if (DFG::shouldShowDisassembly())
3172         dataLog("    Did invalidate ", *this, "\n");
3173     
3174     // Count the reoptimization if that's what the user wanted.
3175     if (mode == CountReoptimization) {
3176         // FIXME: Maybe this should call alternative().
3177         // https://bugs.webkit.org/show_bug.cgi?id=123677
3178         baselineAlternative()->countReoptimization();
3179         if (DFG::shouldShowDisassembly())
3180             dataLog("    Did count reoptimization for ", *this, "\n");
3181     }
3182     
3183     // Now take care of the entrypoint.
3184     if (this != replacement()) {
3185         // This means that we were never the entrypoint. This can happen for OSR entry code
3186         // blocks.
3187         return;
3188     }
3189     alternative()->optimizeAfterWarmUp();
3190     tallyFrequentExitSites();
3191     alternative()->install();
3192     if (DFG::shouldShowDisassembly())
3193         dataLog("    Did install baseline version of ", *this, "\n");
3194 #else // ENABLE(DFG_JIT)
3195     UNUSED_PARAM(mode);
3196     UNUSED_PARAM(detail);
3197     UNREACHABLE_FOR_PLATFORM();
3198 #endif // ENABLE(DFG_JIT)
3199 }
3200
3201 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
3202 {
3203     if (!codeOrigin.inlineCallFrame)
3204         return globalObject();
3205     return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
3206 }
3207
3208 class RecursionCheckFunctor {
3209 public:
3210     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
3211         : m_startCallFrame(startCallFrame)
3212         , m_codeBlock(codeBlock)
3213         , m_depthToCheck(depthToCheck)
3214         , m_foundStartCallFrame(false)
3215         , m_didRecurse(false)
3216     { }
3217
3218     StackVisitor::Status operator()(StackVisitor& visitor)
3219     {
3220         CallFrame* currentCallFrame = visitor->callFrame();
3221
3222         if (currentCallFrame == m_startCallFrame)
3223             m_foundStartCallFrame = true;
3224
3225         if (m_foundStartCallFrame) {
3226             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
3227                 m_didRecurse = true;
3228                 return StackVisitor::Done;
3229             }
3230
3231             if (!m_depthToCheck--)
3232                 return StackVisitor::Done;
3233         }
3234
3235         return StackVisitor::Continue;
3236     }
3237
3238     bool didRecurse() const { return m_didRecurse; }
3239
3240 private:
3241     CallFrame* m_startCallFrame;
3242     CodeBlock* m_codeBlock;
3243     unsigned m_depthToCheck;
3244     bool m_foundStartCallFrame;
3245     bool m_didRecurse;
3246 };
3247
3248 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
3249 {
3250     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
3251     
3252     if (Options::verboseCallLink())
3253         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
3254     
3255 #if ENABLE(DFG_JIT)
3256     if (!m_shouldAlwaysBeInlined)
3257         return;
3258     
3259     if (!callerCodeBlock) {
3260         m_shouldAlwaysBeInlined = false;
3261         if (Options::verboseCallLink())
3262             dataLog("    Clearing SABI because caller is native.\n");
3263         return;
3264     }
3265
3266     if (!hasBaselineJITProfiling())
3267         return;
3268
3269     if (!DFG::mightInlineFunction(this))
3270         return;
3271
3272     if (!canInline(m_capabilityLevelState))
3273         return;
3274     
3275     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
3276         m_shouldAlwaysBeInlined = false;
3277         if (Options::verboseCallLink())
3278             dataLog("    Clearing SABI because caller is too large.\n");
3279         return;
3280     }
3281
3282     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
3283         // If the caller is still in the interpreter, then we can't expect inlining to
3284         // happen anytime soon. Assume it's profitable to optimize it separately. This
3285         // ensures that a function is SABI only if it is called no more frequently than
3286         // any of its callers.
3287         m_shouldAlwaysBeInlined = false;
3288         if (Options::verboseCallLink())
3289             dataLog("    Clearing SABI because caller is in LLInt.\n");
3290         return;
3291     }
3292     
3293     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
3294         m_shouldAlwaysBeInlined = false;
3295         if (Options::verboseCallLink())
3296             dataLog("    Clearing SABI bcause caller was already optimized.\n");
3297         return;
3298     }
3299     
3300     if (callerCodeBlock->codeType() != FunctionCode) {
3301         // If the caller is either eval or global code, assume that that won't be
3302         // optimized anytime soon. For eval code this is particularly true since we
3303         // delay eval optimization by a *lot*.
3304         m_shouldAlwaysBeInlined = false;
3305         if (Options::verboseCallLink())
3306             dataLog("    Clearing SABI because caller is not a function.\n");
3307         return;
3308     }
3309
3310     // Recursive calls won't be inlined.
3311     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
3312     vm()->topCallFrame->iterate(functor);
3313
3314     if (functor.didRecurse()) {
3315         if (Options::verboseCallLink())
3316             dataLog("    Clearing SABI because recursion was detected.\n");
3317         m_shouldAlwaysBeInlined = false;
3318         return;
3319     }
3320     
3321     if (callerCodeBlock->m_capabilityLevelState == DFG::CapabilityLevelNotSet) {
3322         dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
3323         CRASH();
3324     }
3325     
3326     if (canCompile(callerCodeBlock->m_capabilityLevelState))
3327         return;
3328     
3329     if (Options::verboseCallLink())
3330         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
3331     
3332     m_shouldAlwaysBeInlined = false;
3333 #endif
3334 }
3335
3336 unsigned CodeBlock::reoptimizationRetryCounter() const
3337 {
3338 #if ENABLE(JIT)
3339     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
3340     return m_reoptimizationRetryCounter;
3341 #else
3342     return 0;
3343 #endif // ENABLE(JIT)
3344 }
3345
3346 #if ENABLE(JIT)
3347 void CodeBlock::countReoptimization()
3348 {
3349     m_reoptimizationRetryCounter++;
3350     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
3351         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
3352 }
3353
3354 unsigned CodeBlock::numberOfDFGCompiles()
3355 {
3356     ASSERT(JITCode::isBaselineCode(jitType()));
3357     if (Options::testTheFTL()) {
3358         if (m_didFailFTLCompilation)
3359             return 1000000;
3360         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
3361     }
3362     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
3363 }
3364
3365 int32_t CodeBlock::codeTypeThresholdMultiplier() const
3366 {
3367     if (codeType() == EvalCode)
3368         return Options::evalThresholdMultiplier();
3369     
3370     return 1;
3371 }
3372
3373 double CodeBlock::optimizationThresholdScalingFactor()
3374 {
3375     // This expression arises from doing a least-squares fit of
3376     //
3377     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
3378     //
3379     // against the data points:
3380     //
3381     //    x       F[x_]
3382     //    10       0.9          (smallest reasonable code block)
3383     //   200       1.0          (typical small-ish code block)
3384     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
3385     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
3386     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
3387     // 10000       6.0          (similar to above)
3388     //
3389     // I achieve the minimization using the following Mathematica code:
3390     //
3391     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
3392     //
3393     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
3394     //
3395     // solution = 
3396     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
3397     //         {a, b, c, d}][[2]]
3398     //
3399     // And the code below (to initialize a, b, c, d) is generated by:
3400     //
3401     // Print["const double " <> ToString[#[[1]]] <> " = " <>
3402     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
3403     //
3404     // We've long known the following to be true:
3405     // - Small code blocks are cheap to optimize and so we should do it sooner rather
3406     //   than later.
3407     // - Large code blocks are expensive to optimize and so we should postpone doing so,
3408     //   and sometimes have a large enough threshold that we never optimize them.
3409     // - The difference in cost is not totally linear because (a) just invoking the
3410     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
3411     //   in the correlation between instruction count and the actual compilation cost
3412     //   that for those large blocks, the instruction count should not have a strong
3413     //   influence on our threshold.
3414     //
3415     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
3416     // example where the heuristics were right (code block in 3d-cube with instruction
3417     // count 320, which got compiled early as it should have been) and one where they were
3418     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
3419     // to compile and didn't run often enough to warrant compilation in my opinion), and
3420     // then threw in additional data points that represented my own guess of what our
3421     // heuristics should do for some round-numbered examples.
3422     //
3423     // The expression to which I decided to fit the data arose because I started with an
3424     // affine function, and then did two things: put the linear part in an Abs to ensure
3425     // that the fit didn't end up choosing a negative value of c (which would result in
3426     // the function turning over and going negative for large x) and I threw in a Sqrt
3427     // term because Sqrt represents my intution that the function should be more sensitive
3428     // to small changes in small values of x, but less sensitive when x gets large.
3429     
3430     // Note that the current fit essentially eliminates the linear portion of the
3431     // expression (c == 0.0).
3432     const double a = 0.061504;
3433     const double b = 1.02406;
3434     const double c = 0.0;
3435     const double d = 0.825914;
3436     
3437     double instructionCount = this->instructionCount();
3438     
3439     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
3440     
3441     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
3442     
3443     result *= codeTypeThresholdMultiplier();
3444     
3445     if (Options::verboseOSR()) {
3446         dataLog(
3447             *this, ": instruction count is ", instructionCount,
3448             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
3449             "\n");
3450     }
3451     return result;
3452 }
3453
3454 static int32_t clipThreshold(double threshold)
3455 {
3456     if (threshold < 1.0)
3457         return 1;
3458     
3459     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
3460         return std::numeric_limits<int32_t>::max();
3461     
3462     return static_cast<int32_t>(threshold);
3463 }
3464
3465 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
3466 {
3467     return clipThreshold(
3468         static_cast<double>(desiredThreshold) *
3469         optimizationThresholdScalingFactor() *
3470         (1 << reoptimizationRetryCounter()));
3471 }
3472
3473 bool CodeBlock::checkIfOptimizationThresholdReached()
3474 {
3475 #if ENABLE(DFG_JIT)
3476     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
3477         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
3478             == DFG::Worklist::Compiled) {
3479             optimizeNextInvocation();
3480             return true;
3481         }
3482     }
3483 #endif
3484     
3485     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
3486 }
3487
3488 void CodeBlock::optimizeNextInvocation()
3489 {
3490     if (Options::verboseOSR())
3491         dataLog(*this, ": Optimizing next invocation.\n");
3492     m_jitExecuteCounter.setNewThreshold(0, this);
3493 }
3494
3495 void CodeBlock::dontOptimizeAnytimeSoon()
3496 {
3497     if (Options::verboseOSR())
3498         dataLog(*this, ": Not optimizing anytime soon.\n");
3499     m_jitExecuteCounter.deferIndefinitely();
3500 }
3501
3502 void CodeBlock::optimizeAfterWarmUp()
3503 {
3504     if (Options::verboseOSR())
3505         dataLog(*this, ": Optimizing after warm-up.\n");
3506 #if ENABLE(DFG_JIT)
3507     m_jitExecuteCounter.setNewThreshold(
3508         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
3509 #endif
3510 }
3511
3512 void CodeBlock::optimizeAfterLongWarmUp()
3513 {
3514     if (Options::verboseOSR())
3515         dataLog(*this, ": Optimizing after long warm-up.\n");
3516 #if ENABLE(DFG_JIT)
3517     m_jitExecuteCounter.setNewThreshold(
3518         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
3519 #endif
3520 }
3521
3522 void CodeBlock::optimizeSoon()
3523 {
3524     if (Options::verboseOSR())
3525         dataLog(*this, ": Optimizing soon.\n");
3526 #if ENABLE(DFG_JIT)
3527     m_jitExecuteCounter.setNewThreshold(
3528         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
3529 #endif
3530 }
3531
3532 void CodeBlock::forceOptimizationSlowPathConcurrently()
3533 {
3534     if (Options::verboseOSR())
3535         dataLog(*this, ": Forcing slow path concurrently.\n");
3536     m_jitExecuteCounter.forceSlowPathConcurrently();
3537 }
3538
3539 #if ENABLE(DFG_JIT)
3540 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
3541 {
3542     JITCode::JITType type = jitType();
3543     if (type != JITCode::BaselineJIT) {
3544         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
3545         RELEASE_ASSERT_NOT_REACHED();
3546     }
3547     
3548     CodeBlock* theReplacement = replacement();
3549     if ((result == CompilationSuccessful) != (theReplacement != this)) {
3550         dataLog(*this, ": we have result = ", result, " but ");
3551         if (theReplacement == this)
3552             dataLog("we are our own replacement.\n");
3553         else
3554             dataLog("our replacement is ", pointerDump(theReplacement), "\n");
3555         RELEASE_ASSERT_NOT_REACHED();
3556     }
3557     
3558     switch (result) {
3559     case CompilationSuccessful:
3560         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3561         optimizeNextInvocation();
3562         return;
3563     case CompilationFailed:
3564         dontOptimizeAnytimeSoon();
3565         return;
3566     case CompilationDeferred:
3567         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3568         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3569         // necessarily guarantee anything. So, we make sure that even if that
3570         // function ends up being a no-op, we still eventually retry and realize
3571         // that we have optimized code ready.
3572         optimizeAfterWarmUp();
3573         return;
3574     case CompilationInvalidated:
3575         // Retry with exponential backoff.
3576         countReoptimization();
3577         optimizeAfterWarmUp();
3578         return;
3579     }
3580     
3581     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
3582     RELEASE_ASSERT_NOT_REACHED();
3583 }
3584
3585 #endif
3586     
3587 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
3588 {
3589     ASSERT(JITCode::isOptimizingJIT(jitType()));
3590     // Compute this the lame way so we don't saturate. This is called infrequently
3591     // enough that this loop won't hurt us.
3592     unsigned result = desiredThreshold;
3593     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
3594         unsigned newResult = result << 1;
3595         if (newResult < result)
3596             return std::numeric_limits<uint32_t>::max();
3597         result = newResult;
3598     }
3599     return result;
3600 }
3601
3602 uint32_t CodeBlock::exitCountThresholdForReoptimization()
3603 {
3604     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3605 }
3606
3607 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3608 {
3609     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3610 }
3611
3612 bool CodeBlock::shouldReoptimizeNow()
3613 {
3614     return osrExitCounter() >= exitCountThresholdForReoptimization();
3615 }
3616
3617 bool CodeBlock::shouldReoptimizeFromLoopNow()
3618 {
3619     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3620 }
3621 #endif
3622
3623 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
3624 {
3625     for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
3626         if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
3627             return &m_arrayProfiles[i];
3628     }
3629     return 0;
3630 }
3631
3632 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
3633 {
3634     ArrayProfile* result = getArrayProfile(bytecodeOffset);
3635     if (result)
3636         return result;
3637     return addArrayProfile(bytecodeOffset);
3638 }
3639
3640 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
3641 {
3642     ConcurrentJITLocker locker(m_lock);
3643     
3644     numberOfLiveNonArgumentValueProfiles = 0;
3645     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3646     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3647         ValueProfile* profile = getFromAllValueProfiles(i);
3648         unsigned numSamples = profile->totalNumberOfSamples();
3649         if (numSamples > ValueProfile::numberOfBuckets)
3650             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
3651         numberOfSamplesInProfiles += numSamples;
3652         if (profile->m_bytecodeOffset < 0) {
3653             profile->computeUpdatedPrediction(locker);
3654             continue;
3655         }
3656         if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
3657             numberOfLiveNonArgumentValueProfiles++;
3658         profile->computeUpdatedPrediction(locker);
3659     }
3660     
3661 #if ENABLE(DFG_JIT)
3662     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
3663 #endif
3664 }
3665
3666 void CodeBlock::updateAllValueProfilePredictions()
3667 {
3668     unsigned ignoredValue1, ignoredValue2;
3669     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
3670 }
3671
3672 void CodeBlock::updateAllArrayPredictions()
3673 {
3674     ConcurrentJITLocker locker(m_lock);
3675     
3676     for (unsigned i = m_arrayProfiles.size(); i--;)
3677         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
3678     
3679     // Don't count these either, for similar reasons.
3680     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
3681         m_arrayAllocationProfiles[i].updateIndexingType();
3682 }
3683
3684 void CodeBlock::updateAllPredictions()
3685 {
3686     updateAllValueProfilePredictions();
3687     updateAllArrayPredictions();
3688 }
3689
3690 bool CodeBlock::shouldOptimizeNow()
3691 {
3692     if (Options::verboseOSR())
3693         dataLog("Considering optimizing ", *this, "...\n");
3694
3695     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
3696         return true;
3697     
3698     updateAllArrayPredictions();
3699     
3700     unsigned numberOfLiveNonArgumentValueProfiles;
3701     unsigned numberOfSamplesInProfiles;
3702     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
3703
3704     if (Options::verboseOSR()) {
3705         dataLogF(
3706             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
3707             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
3708             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
3709             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
3710             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
3711     }
3712
3713     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
3714         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
3715         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
3716         return true;
3717     
3718     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
3719     m_optimizationDelayCounter++;
3720     optimizeAfterWarmUp();
3721     return false;
3722 }
3723
3724 #if ENABLE(DFG_JIT)
3725 void CodeBlock::tallyFrequentExitSites()
3726 {
3727     ASSERT(JITCode::isOptimizingJIT(jitType()));
3728     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
3729     
3730     CodeBlock* profiledBlock = alternative();
3731     
3732     switch (jitType()) {
3733     case JITCode::DFGJIT: {
3734         DFG::JITCode* jitCode = m_jitCode->dfg();
3735         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {