c9331f737ab7303199e7e4a9a24cca0d1dbc1955
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "BasicBlockLocation.h"
34 #include "BytecodeGenerator.h"
35 #include "BytecodeUseDef.h"
36 #include "CallLinkStatus.h"
37 #include "DFGCapabilities.h"
38 #include "DFGCommon.h"
39 #include "DFGDriver.h"
40 #include "DFGJITCode.h"
41 #include "DFGWorklist.h"
42 #include "Debugger.h"
43 #include "FunctionExecutableDump.h"
44 #include "Interpreter.h"
45 #include "JIT.h"
46 #include "JITStubs.h"
47 #include "JSCJSValue.h"
48 #include "JSFunction.h"
49 #include "JSLexicalEnvironment.h"
50 #include "LLIntEntrypoint.h"
51 #include "LowLevelInterpreter.h"
52 #include "JSCInlines.h"
53 #include "PolymorphicGetByIdList.h"
54 #include "PolymorphicPutByIdList.h"
55 #include "ProfilerDatabase.h"
56 #include "ReduceWhitespace.h"
57 #include "Repatch.h"
58 #include "RepatchBuffer.h"
59 #include "SlotVisitorInlines.h"
60 #include "StackVisitor.h"
61 #include "TypeLocationCache.h"
62 #include "TypeProfiler.h"
63 #include "UnlinkedInstructionStream.h"
64 #include <wtf/BagToHashMap.h>
65 #include <wtf/CommaPrinter.h>
66 #include <wtf/StringExtras.h>
67 #include <wtf/StringPrintStream.h>
68 #include <wtf/text/UniquedStringImpl.h>
69
70 #if ENABLE(DFG_JIT)
71 #include "DFGOperations.h"
72 #endif
73
74 #if ENABLE(FTL_JIT)
75 #include "FTLJITCode.h"
76 #endif
77
78 namespace JSC {
79
80 CString CodeBlock::inferredName() const
81 {
82     switch (codeType()) {
83     case GlobalCode:
84         return "<global>";
85     case EvalCode:
86         return "<eval>";
87     case FunctionCode:
88         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
89     default:
90         CRASH();
91         return CString("", 0);
92     }
93 }
94
95 bool CodeBlock::hasHash() const
96 {
97     return !!m_hash;
98 }
99
100 bool CodeBlock::isSafeToComputeHash() const
101 {
102     return !isCompilationThread();
103 }
104
105 CodeBlockHash CodeBlock::hash() const
106 {
107     if (!m_hash) {
108         RELEASE_ASSERT(isSafeToComputeHash());
109         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
110     }
111     return m_hash;
112 }
113
114 CString CodeBlock::sourceCodeForTools() const
115 {
116     if (codeType() != FunctionCode)
117         return ownerExecutable()->source().toUTF8();
118     
119     SourceProvider* provider = source();
120     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
121     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
122     unsigned unlinkedStartOffset = unlinked->startOffset();
123     unsigned linkedStartOffset = executable->source().startOffset();
124     int delta = linkedStartOffset - unlinkedStartOffset;
125     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
126     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
127     return toCString(
128         "function ",
129         provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
130 }
131
132 CString CodeBlock::sourceCodeOnOneLine() const
133 {
134     return reduceWhitespace(sourceCodeForTools());
135 }
136
137 CString CodeBlock::hashAsStringIfPossible() const
138 {
139     if (hasHash() || isSafeToComputeHash())
140         return toCString(hash());
141     return "<no-hash>";
142 }
143
144 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
145 {
146     out.print(inferredName(), "#", hashAsStringIfPossible());
147     out.print(":[", RawPointer(this), "->");
148     if (!!m_alternative)
149         out.print(RawPointer(m_alternative.get()), "->");
150     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
151
152     if (codeType() == FunctionCode)
153         out.print(specializationKind());
154     out.print(", ", instructionCount());
155     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
156         out.print(" (ShouldAlwaysBeInlined)");
157     if (ownerExecutable()->neverInline())
158         out.print(" (NeverInline)");
159     if (ownerExecutable()->didTryToEnterInLoop())
160         out.print(" (DidTryToEnterInLoop)");
161     if (ownerExecutable()->isStrictMode())
162         out.print(" (StrictMode)");
163     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
164         out.print(" (FTLFail)");
165     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
166         out.print(" (HadFTLReplacement)");
167     out.print("]");
168 }
169
170 void CodeBlock::dump(PrintStream& out) const
171 {
172     dumpAssumingJITType(out, jitType());
173 }
174
175 static CString idName(int id0, const Identifier& ident)
176 {
177     return toCString(ident.impl(), "(@id", id0, ")");
178 }
179
180 CString CodeBlock::registerName(int r) const
181 {
182     if (isConstantRegisterIndex(r))
183         return constantName(r);
184
185     return toCString(VirtualRegister(r));
186 }
187
188 CString CodeBlock::constantName(int index) const
189 {
190     JSValue value = getConstant(index);
191     return toCString(value, "(", VirtualRegister(index), ")");
192 }
193
194 static CString regexpToSourceString(RegExp* regExp)
195 {
196     char postfix[5] = { '/', 0, 0, 0, 0 };
197     int index = 1;
198     if (regExp->global())
199         postfix[index++] = 'g';
200     if (regExp->ignoreCase())
201         postfix[index++] = 'i';
202     if (regExp->multiline())
203         postfix[index] = 'm';
204
205     return toCString("/", regExp->pattern().impl(), postfix);
206 }
207
208 static CString regexpName(int re, RegExp* regexp)
209 {
210     return toCString(regexpToSourceString(regexp), "(@re", re, ")");
211 }
212
213 NEVER_INLINE static const char* debugHookName(int debugHookID)
214 {
215     switch (static_cast<DebugHookID>(debugHookID)) {
216         case DidEnterCallFrame:
217             return "didEnterCallFrame";
218         case WillLeaveCallFrame:
219             return "willLeaveCallFrame";
220         case WillExecuteStatement:
221             return "willExecuteStatement";
222         case WillExecuteProgram:
223             return "willExecuteProgram";
224         case DidExecuteProgram:
225             return "didExecuteProgram";
226         case DidReachBreakpoint:
227             return "didReachBreakpoint";
228     }
229
230     RELEASE_ASSERT_NOT_REACHED();
231     return "";
232 }
233
234 void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
235 {
236     int r0 = (++it)->u.operand;
237     int r1 = (++it)->u.operand;
238
239     printLocationAndOp(out, exec, location, it, op);
240     out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
241 }
242
243 void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
244 {
245     int r0 = (++it)->u.operand;
246     int r1 = (++it)->u.operand;
247     int r2 = (++it)->u.operand;
248     printLocationAndOp(out, exec, location, it, op);
249     out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
250 }
251
252 void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
253 {
254     int r0 = (++it)->u.operand;
255     int offset = (++it)->u.operand;
256     printLocationAndOp(out, exec, location, it, op);
257     out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
258 }
259
260 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
261 {
262     const char* op;
263     switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
264     case op_get_by_id:
265         op = "get_by_id";
266         break;
267     case op_get_by_id_out_of_line:
268         op = "get_by_id_out_of_line";
269         break;
270     case op_get_array_length:
271         op = "array_length";
272         break;
273     default:
274         RELEASE_ASSERT_NOT_REACHED();
275 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
276         op = 0;
277 #endif
278     }
279     int r0 = (++it)->u.operand;
280     int r1 = (++it)->u.operand;
281     int id0 = (++it)->u.operand;
282     printLocationAndOp(out, exec, location, it, op);
283     out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
284     it += 4; // Increment up to the value profiler.
285 }
286
287 static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
288 {
289     if (!structure)
290         return;
291     
292     out.printf("%s = %p", name, structure);
293     
294     PropertyOffset offset = structure->getConcurrently(ident.impl());
295     if (offset != invalidOffset)
296         out.printf(" (offset = %d)", offset);
297 }
298
299 static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
300 {
301     out.printf("chain = %p: [", chain);
302     bool first = true;
303     for (WriteBarrier<Structure>* currentStructure = chain->head();
304          *currentStructure;
305          ++currentStructure) {
306         if (first)
307             first = false;
308         else
309             out.printf(", ");
310         dumpStructure(out, "struct", currentStructure->get(), ident);
311     }
312     out.printf("]");
313 }
314
315 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
316 {
317     Instruction* instruction = instructions().begin() + location;
318
319     const Identifier& ident = identifier(instruction[3].u.operand);
320     
321     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
322     
323     if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
324         out.printf(" llint(array_length)");
325     else if (Structure* structure = instruction[4].u.structure.get()) {
326         out.printf(" llint(");
327         dumpStructure(out, "struct", structure, ident);
328         out.printf(")");
329     }
330
331 #if ENABLE(JIT)
332     if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
333         StructureStubInfo& stubInfo = *stubPtr;
334         if (stubInfo.resetByGC)
335             out.print(" (Reset By GC)");
336         
337         if (stubInfo.seen) {
338             out.printf(" jit(");
339             
340             Structure* baseStructure = 0;
341             Structure* prototypeStructure = 0;
342             PolymorphicGetByIdList* list = 0;
343             
344             switch (stubInfo.accessType) {
345             case access_get_by_id_self:
346                 out.printf("self");
347                 baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
348                 break;
349             case access_get_by_id_list:
350                 out.printf("list");
351                 list = stubInfo.u.getByIdList.list;
352                 break;
353             case access_unset:
354                 out.printf("unset");
355                 break;
356             default:
357                 RELEASE_ASSERT_NOT_REACHED();
358                 break;
359             }
360             
361             if (baseStructure) {
362                 out.printf(", ");
363                 dumpStructure(out, "struct", baseStructure, ident);
364             }
365             
366             if (prototypeStructure) {
367                 out.printf(", ");
368                 dumpStructure(out, "prototypeStruct", baseStructure, ident);
369             }
370             
371             if (list) {
372                 out.printf(", list = %p: [", list);
373                 for (unsigned i = 0; i < list->size(); ++i) {
374                     if (i)
375                         out.printf(", ");
376                     out.printf("(");
377                     dumpStructure(out, "base", list->at(i).structure(), ident);
378                     if (!list->at(i).conditionSet().isEmpty()) {
379                         out.printf(", ");
380                         out.print(list->at(i).conditionSet());
381                     }
382                     out.printf(")");
383                 }
384                 out.printf("]");
385             }
386             out.printf(")");
387         }
388     }
389 #else
390     UNUSED_PARAM(map);
391 #endif
392 }
393
394 void CodeBlock::printPutByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
395 {
396     Instruction* instruction = instructions().begin() + location;
397
398     const Identifier& ident = identifier(instruction[2].u.operand);
399     
400     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
401     
402     if (Structure* structure = instruction[4].u.structure.get()) {
403         switch (exec->interpreter()->getOpcodeID(instruction[0].u.opcode)) {
404         case op_put_by_id:
405         case op_put_by_id_out_of_line:
406             out.print(" llint(");
407             dumpStructure(out, "struct", structure, ident);
408             out.print(")");
409             break;
410             
411         case op_put_by_id_transition_direct:
412         case op_put_by_id_transition_normal:
413         case op_put_by_id_transition_direct_out_of_line:
414         case op_put_by_id_transition_normal_out_of_line:
415             out.print(" llint(");
416             dumpStructure(out, "prev", structure, ident);
417             out.print(", ");
418             dumpStructure(out, "next", instruction[6].u.structure.get(), ident);
419             if (StructureChain* chain = instruction[7].u.structureChain.get()) {
420                 out.print(", ");
421                 dumpChain(out, chain, ident);
422             }
423             out.print(")");
424             break;
425             
426         default:
427             out.print(" llint(unknown)");
428             break;
429         }
430     }
431
432 #if ENABLE(JIT)
433     if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
434         StructureStubInfo& stubInfo = *stubPtr;
435         if (stubInfo.resetByGC)
436             out.print(" (Reset By GC)");
437         
438         if (stubInfo.seen) {
439             out.printf(" jit(");
440             
441             switch (stubInfo.accessType) {
442             case access_put_by_id_replace:
443                 out.print("replace, ");
444                 dumpStructure(out, "struct", stubInfo.u.putByIdReplace.baseObjectStructure.get(), ident);
445                 break;
446             case access_put_by_id_transition_normal:
447             case access_put_by_id_transition_direct:
448                 out.print("transition, ");
449                 dumpStructure(out, "prev", stubInfo.u.putByIdTransition.previousStructure.get(), ident);
450                 out.print(", ");
451                 dumpStructure(out, "next", stubInfo.u.putByIdTransition.structure.get(), ident);
452                 if (stubInfo.u.putByIdTransition.rawConditionSet)
453                     out.print(", ", ObjectPropertyConditionSet::fromRawPointer(stubInfo.u.putByIdTransition.rawConditionSet));
454                 break;
455             case access_put_by_id_list: {
456                 out.printf("list = [");
457                 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
458                 CommaPrinter comma;
459                 for (unsigned i = 0; i < list->size(); ++i) {
460                     out.print(comma, "(");
461                     const PutByIdAccess& access = list->at(i);
462                     
463                     if (access.isReplace()) {
464                         out.print("replace, ");
465                         dumpStructure(out, "struct", access.oldStructure(), ident);
466                     } else if (access.isSetter()) {
467                         out.print("setter, ");
468                         dumpStructure(out, "struct", access.oldStructure(), ident);
469                     } else if (access.isCustom()) {
470                         out.print("custom, ");
471                         dumpStructure(out, "struct", access.oldStructure(), ident);
472                     } else if (access.isTransition()) {
473                         out.print("transition, ");
474                         dumpStructure(out, "prev", access.oldStructure(), ident);
475                         out.print(", ");
476                         dumpStructure(out, "next", access.newStructure(), ident);
477                         if (!access.conditionSet().isEmpty())
478                             out.print(", ", access.conditionSet());
479                     } else
480                         out.print("unknown");
481                     
482                     out.print(")");
483                 }
484                 out.print("]");
485                 break;
486             }
487             case access_unset:
488                 out.printf("unset");
489                 break;
490             default:
491                 RELEASE_ASSERT_NOT_REACHED();
492                 break;
493             }
494             out.printf(")");
495         }
496     }
497 #else
498     UNUSED_PARAM(map);
499 #endif
500 }
501
502 void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
503 {
504     int dst = (++it)->u.operand;
505     int func = (++it)->u.operand;
506     int argCount = (++it)->u.operand;
507     int registerOffset = (++it)->u.operand;
508     printLocationAndOp(out, exec, location, it, op);
509     out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
510     if (cacheDumpMode == DumpCaches) {
511         LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
512         if (callLinkInfo->lastSeenCallee) {
513             out.printf(
514                 " llint(%p, exec %p)",
515                 callLinkInfo->lastSeenCallee.get(),
516                 callLinkInfo->lastSeenCallee->executable());
517         }
518 #if ENABLE(JIT)
519         if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
520             JSFunction* target = info->lastSeenCallee();
521             if (target)
522                 out.printf(" jit(%p, exec %p)", target, target->executable());
523         }
524         
525         if (jitType() != JITCode::FTLJIT)
526             out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
527 #else
528         UNUSED_PARAM(map);
529 #endif
530     }
531     ++it;
532     ++it;
533     dumpArrayProfiling(out, it, hasPrintedProfiling);
534     dumpValueProfiling(out, it, hasPrintedProfiling);
535 }
536
537 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
538 {
539     int r0 = (++it)->u.operand;
540     int id0 = (++it)->u.operand;
541     int r1 = (++it)->u.operand;
542     printLocationAndOp(out, exec, location, it, op);
543     out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
544     it += 5;
545 }
546
547 void CodeBlock::dumpSource()
548 {
549     dumpSource(WTF::dataFile());
550 }
551
552 void CodeBlock::dumpSource(PrintStream& out)
553 {
554     ScriptExecutable* executable = ownerExecutable();
555     if (executable->isFunctionExecutable()) {
556         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
557         String source = functionExecutable->source().provider()->getRange(
558             functionExecutable->parametersStartOffset(),
559             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
560         
561         out.print("function ", inferredName(), source);
562         return;
563     }
564     out.print(executable->source().toString());
565 }
566
567 void CodeBlock::dumpBytecode()
568 {
569     dumpBytecode(WTF::dataFile());
570 }
571
572 void CodeBlock::dumpBytecode(PrintStream& out)
573 {
574     // We only use the ExecState* for things that don't actually lead to JS execution,
575     // like converting a JSString to a String. Hence the globalExec is appropriate.
576     ExecState* exec = m_globalObject->globalExec();
577     
578     size_t instructionCount = 0;
579
580     for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
581         ++instructionCount;
582
583     out.print(*this);
584     out.printf(
585         ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
586         static_cast<unsigned long>(instructions().size()),
587         static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
588         m_numParameters, m_numCalleeRegisters, m_numVars);
589     if (needsActivation() && codeType() == FunctionCode)
590         out.printf("; lexical environment in r%d", activationRegister().offset());
591     out.printf("\n");
592     
593     StubInfoMap stubInfos;
594     CallLinkInfoMap callLinkInfos;
595     getStubInfoMap(stubInfos);
596     getCallLinkInfoMap(callLinkInfos);
597     
598     const Instruction* begin = instructions().begin();
599     const Instruction* end = instructions().end();
600     for (const Instruction* it = begin; it != end; ++it)
601         dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
602     
603     if (numberOfIdentifiers()) {
604         out.printf("\nIdentifiers:\n");
605         size_t i = 0;
606         do {
607             out.printf("  id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
608             ++i;
609         } while (i != numberOfIdentifiers());
610     }
611
612     if (!m_constantRegisters.isEmpty()) {
613         out.printf("\nConstants:\n");
614         size_t i = 0;
615         do {
616             const char* sourceCodeRepresentationDescription = nullptr;
617             switch (m_constantsSourceCodeRepresentation[i]) {
618             case SourceCodeRepresentation::Double:
619                 sourceCodeRepresentationDescription = ": in source as double";
620                 break;
621             case SourceCodeRepresentation::Integer:
622                 sourceCodeRepresentationDescription = ": in source as integer";
623                 break;
624             case SourceCodeRepresentation::Other:
625                 sourceCodeRepresentationDescription = "";
626                 break;
627             }
628             out.printf("   k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
629             ++i;
630         } while (i < m_constantRegisters.size());
631     }
632
633     if (size_t count = m_unlinkedCode->numberOfRegExps()) {
634         out.printf("\nm_regexps:\n");
635         size_t i = 0;
636         do {
637             out.printf("  re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
638             ++i;
639         } while (i < count);
640     }
641
642     if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
643         out.printf("\nException Handlers:\n");
644         unsigned i = 0;
645         do {
646             HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
647             out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n",
648                 i + 1, handler.start, handler.end, handler.target, handler.typeName());
649             ++i;
650         } while (i < m_rareData->m_exceptionHandlers.size());
651     }
652     
653     if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
654         out.printf("Switch Jump Tables:\n");
655         unsigned i = 0;
656         do {
657             out.printf("  %1d = {\n", i);
658             int entry = 0;
659             Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
660             for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
661                 if (!*iter)
662                     continue;
663                 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
664             }
665             out.printf("      }\n");
666             ++i;
667         } while (i < m_rareData->m_switchJumpTables.size());
668     }
669     
670     if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
671         out.printf("\nString Switch Jump Tables:\n");
672         unsigned i = 0;
673         do {
674             out.printf("  %1d = {\n", i);
675             StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
676             for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
677                 out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
678             out.printf("      }\n");
679             ++i;
680         } while (i < m_rareData->m_stringSwitchJumpTables.size());
681     }
682
683     out.printf("\n");
684 }
685
686 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
687 {
688     if (hasPrintedProfiling) {
689         out.print("; ");
690         return;
691     }
692     
693     out.print("    ");
694     hasPrintedProfiling = true;
695 }
696
697 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
698 {
699     ConcurrentJITLocker locker(m_lock);
700     
701     ++it;
702     CString description = it->u.profile->briefDescription(locker);
703     if (!description.length())
704         return;
705     beginDumpProfiling(out, hasPrintedProfiling);
706     out.print(description);
707 }
708
709 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
710 {
711     ConcurrentJITLocker locker(m_lock);
712     
713     ++it;
714     if (!it->u.arrayProfile)
715         return;
716     CString description = it->u.arrayProfile->briefDescription(locker, this);
717     if (!description.length())
718         return;
719     beginDumpProfiling(out, hasPrintedProfiling);
720     out.print(description);
721 }
722
723 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
724 {
725     if (!profile || !profile->m_counter)
726         return;
727
728     beginDumpProfiling(out, hasPrintedProfiling);
729     out.print(name, profile->m_counter);
730 }
731
732 void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
733 {
734     out.printf("[%4d] %-17s ", location, op);
735 }
736
737 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
738 {
739     printLocationAndOp(out, exec, location, it, op);
740     out.printf("%s", registerName(operand).data());
741 }
742
743 void CodeBlock::dumpBytecode(
744     PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
745     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
746 {
747     int location = it - begin;
748     bool hasPrintedProfiling = false;
749     OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
750     switch (opcode) {
751         case op_enter: {
752             printLocationAndOp(out, exec, location, it, "enter");
753             break;
754         }
755         case op_get_scope: {
756             int r0 = (++it)->u.operand;
757             printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
758             break;
759         }
760         case op_create_direct_arguments: {
761             int r0 = (++it)->u.operand;
762             printLocationAndOp(out, exec, location, it, "create_direct_arguments");
763             out.printf("%s", registerName(r0).data());
764             break;
765         }
766         case op_create_scoped_arguments: {
767             int r0 = (++it)->u.operand;
768             int r1 = (++it)->u.operand;
769             printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
770             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
771             break;
772         }
773         case op_create_out_of_band_arguments: {
774             int r0 = (++it)->u.operand;
775             printLocationAndOp(out, exec, location, it, "create_out_of_band_arguments");
776             out.printf("%s", registerName(r0).data());
777             break;
778         }
779         case op_create_this: {
780             int r0 = (++it)->u.operand;
781             int r1 = (++it)->u.operand;
782             unsigned inferredInlineCapacity = (++it)->u.operand;
783             unsigned cachedFunction = (++it)->u.operand;
784             printLocationAndOp(out, exec, location, it, "create_this");
785             out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
786             break;
787         }
788         case op_to_this: {
789             int r0 = (++it)->u.operand;
790             printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
791             Structure* structure = (++it)->u.structure.get();
792             if (structure)
793                 out.print(", cache(struct = ", RawPointer(structure), ")");
794             out.print(", ", (++it)->u.toThisStatus);
795             break;
796         }
797         case op_check_tdz: {
798             int r0 = (++it)->u.operand;
799             printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
800             break;
801         }
802         case op_new_object: {
803             int r0 = (++it)->u.operand;
804             unsigned inferredInlineCapacity = (++it)->u.operand;
805             printLocationAndOp(out, exec, location, it, "new_object");
806             out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
807             ++it; // Skip object allocation profile.
808             break;
809         }
810         case op_new_array: {
811             int dst = (++it)->u.operand;
812             int argv = (++it)->u.operand;
813             int argc = (++it)->u.operand;
814             printLocationAndOp(out, exec, location, it, "new_array");
815             out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
816             ++it; // Skip array allocation profile.
817             break;
818         }
819         case op_new_array_with_size: {
820             int dst = (++it)->u.operand;
821             int length = (++it)->u.operand;
822             printLocationAndOp(out, exec, location, it, "new_array_with_size");
823             out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
824             ++it; // Skip array allocation profile.
825             break;
826         }
827         case op_new_array_buffer: {
828             int dst = (++it)->u.operand;
829             int argv = (++it)->u.operand;
830             int argc = (++it)->u.operand;
831             printLocationAndOp(out, exec, location, it, "new_array_buffer");
832             out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
833             ++it; // Skip array allocation profile.
834             break;
835         }
836         case op_new_regexp: {
837             int r0 = (++it)->u.operand;
838             int re0 = (++it)->u.operand;
839             printLocationAndOp(out, exec, location, it, "new_regexp");
840             out.printf("%s, ", registerName(r0).data());
841             if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
842                 out.printf("%s", regexpName(re0, regexp(re0)).data());
843             else
844                 out.printf("bad_regexp(%d)", re0);
845             break;
846         }
847         case op_mov: {
848             int r0 = (++it)->u.operand;
849             int r1 = (++it)->u.operand;
850             printLocationAndOp(out, exec, location, it, "mov");
851             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
852             break;
853         }
854         case op_profile_type: {
855             int r0 = (++it)->u.operand;
856             ++it;
857             ++it;
858             ++it;
859             ++it;
860             printLocationAndOp(out, exec, location, it, "op_profile_type");
861             out.printf("%s", registerName(r0).data());
862             break;
863         }
864         case op_profile_control_flow: {
865             BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
866             printLocationAndOp(out, exec, location, it, "profile_control_flow");
867             out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
868             break;
869         }
870         case op_not: {
871             printUnaryOp(out, exec, location, it, "not");
872             break;
873         }
874         case op_eq: {
875             printBinaryOp(out, exec, location, it, "eq");
876             break;
877         }
878         case op_eq_null: {
879             printUnaryOp(out, exec, location, it, "eq_null");
880             break;
881         }
882         case op_neq: {
883             printBinaryOp(out, exec, location, it, "neq");
884             break;
885         }
886         case op_neq_null: {
887             printUnaryOp(out, exec, location, it, "neq_null");
888             break;
889         }
890         case op_stricteq: {
891             printBinaryOp(out, exec, location, it, "stricteq");
892             break;
893         }
894         case op_nstricteq: {
895             printBinaryOp(out, exec, location, it, "nstricteq");
896             break;
897         }
898         case op_less: {
899             printBinaryOp(out, exec, location, it, "less");
900             break;
901         }
902         case op_lesseq: {
903             printBinaryOp(out, exec, location, it, "lesseq");
904             break;
905         }
906         case op_greater: {
907             printBinaryOp(out, exec, location, it, "greater");
908             break;
909         }
910         case op_greatereq: {
911             printBinaryOp(out, exec, location, it, "greatereq");
912             break;
913         }
914         case op_inc: {
915             int r0 = (++it)->u.operand;
916             printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
917             break;
918         }
919         case op_dec: {
920             int r0 = (++it)->u.operand;
921             printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
922             break;
923         }
924         case op_to_number: {
925             printUnaryOp(out, exec, location, it, "to_number");
926             break;
927         }
928         case op_to_string: {
929             printUnaryOp(out, exec, location, it, "to_string");
930             break;
931         }
932         case op_negate: {
933             printUnaryOp(out, exec, location, it, "negate");
934             break;
935         }
936         case op_add: {
937             printBinaryOp(out, exec, location, it, "add");
938             ++it;
939             break;
940         }
941         case op_mul: {
942             printBinaryOp(out, exec, location, it, "mul");
943             ++it;
944             break;
945         }
946         case op_div: {
947             printBinaryOp(out, exec, location, it, "div");
948             ++it;
949             break;
950         }
951         case op_mod: {
952             printBinaryOp(out, exec, location, it, "mod");
953             break;
954         }
955         case op_sub: {
956             printBinaryOp(out, exec, location, it, "sub");
957             ++it;
958             break;
959         }
960         case op_lshift: {
961             printBinaryOp(out, exec, location, it, "lshift");
962             break;            
963         }
964         case op_rshift: {
965             printBinaryOp(out, exec, location, it, "rshift");
966             break;
967         }
968         case op_urshift: {
969             printBinaryOp(out, exec, location, it, "urshift");
970             break;
971         }
972         case op_bitand: {
973             printBinaryOp(out, exec, location, it, "bitand");
974             ++it;
975             break;
976         }
977         case op_bitxor: {
978             printBinaryOp(out, exec, location, it, "bitxor");
979             ++it;
980             break;
981         }
982         case op_bitor: {
983             printBinaryOp(out, exec, location, it, "bitor");
984             ++it;
985             break;
986         }
987         case op_check_has_instance: {
988             int r0 = (++it)->u.operand;
989             int r1 = (++it)->u.operand;
990             int r2 = (++it)->u.operand;
991             int offset = (++it)->u.operand;
992             printLocationAndOp(out, exec, location, it, "check_has_instance");
993             out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
994             break;
995         }
996         case op_instanceof: {
997             int r0 = (++it)->u.operand;
998             int r1 = (++it)->u.operand;
999             int r2 = (++it)->u.operand;
1000             printLocationAndOp(out, exec, location, it, "instanceof");
1001             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1002             break;
1003         }
1004         case op_unsigned: {
1005             printUnaryOp(out, exec, location, it, "unsigned");
1006             break;
1007         }
1008         case op_typeof: {
1009             printUnaryOp(out, exec, location, it, "typeof");
1010             break;
1011         }
1012         case op_is_undefined: {
1013             printUnaryOp(out, exec, location, it, "is_undefined");
1014             break;
1015         }
1016         case op_is_boolean: {
1017             printUnaryOp(out, exec, location, it, "is_boolean");
1018             break;
1019         }
1020         case op_is_number: {
1021             printUnaryOp(out, exec, location, it, "is_number");
1022             break;
1023         }
1024         case op_is_string: {
1025             printUnaryOp(out, exec, location, it, "is_string");
1026             break;
1027         }
1028         case op_is_object: {
1029             printUnaryOp(out, exec, location, it, "is_object");
1030             break;
1031         }
1032         case op_is_object_or_null: {
1033             printUnaryOp(out, exec, location, it, "is_object_or_null");
1034             break;
1035         }
1036         case op_is_function: {
1037             printUnaryOp(out, exec, location, it, "is_function");
1038             break;
1039         }
1040         case op_in: {
1041             printBinaryOp(out, exec, location, it, "in");
1042             break;
1043         }
1044         case op_get_by_id:
1045         case op_get_by_id_out_of_line:
1046         case op_get_array_length: {
1047             printGetByIdOp(out, exec, location, it);
1048             printGetByIdCacheStatus(out, exec, location, stubInfos);
1049             dumpValueProfiling(out, it, hasPrintedProfiling);
1050             break;
1051         }
1052         case op_put_by_id: {
1053             printPutByIdOp(out, exec, location, it, "put_by_id");
1054             printPutByIdCacheStatus(out, exec, location, stubInfos);
1055             break;
1056         }
1057         case op_put_by_id_out_of_line: {
1058             printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
1059             printPutByIdCacheStatus(out, exec, location, stubInfos);
1060             break;
1061         }
1062         case op_put_by_id_transition_direct: {
1063             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
1064             printPutByIdCacheStatus(out, exec, location, stubInfos);
1065             break;
1066         }
1067         case op_put_by_id_transition_direct_out_of_line: {
1068             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
1069             printPutByIdCacheStatus(out, exec, location, stubInfos);
1070             break;
1071         }
1072         case op_put_by_id_transition_normal: {
1073             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
1074             printPutByIdCacheStatus(out, exec, location, stubInfos);
1075             break;
1076         }
1077         case op_put_by_id_transition_normal_out_of_line: {
1078             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
1079             printPutByIdCacheStatus(out, exec, location, stubInfos);
1080             break;
1081         }
1082         case op_put_getter_by_id: {
1083             int r0 = (++it)->u.operand;
1084             int id0 = (++it)->u.operand;
1085             int r1 = (++it)->u.operand;
1086             printLocationAndOp(out, exec, location, it, "put_getter_by_id");
1087             out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
1088             break;
1089         }
1090         case op_put_setter_by_id: {
1091             int r0 = (++it)->u.operand;
1092             int id0 = (++it)->u.operand;
1093             int r1 = (++it)->u.operand;
1094             printLocationAndOp(out, exec, location, it, "put_setter_by_id");
1095             out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
1096             break;
1097         }
1098         case op_put_getter_setter: {
1099             int r0 = (++it)->u.operand;
1100             int id0 = (++it)->u.operand;
1101             int r1 = (++it)->u.operand;
1102             int r2 = (++it)->u.operand;
1103             printLocationAndOp(out, exec, location, it, "put_getter_setter");
1104             out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
1105             break;
1106         }
1107         case op_del_by_id: {
1108             int r0 = (++it)->u.operand;
1109             int r1 = (++it)->u.operand;
1110             int id0 = (++it)->u.operand;
1111             printLocationAndOp(out, exec, location, it, "del_by_id");
1112             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1113             break;
1114         }
1115         case op_get_by_val: {
1116             int r0 = (++it)->u.operand;
1117             int r1 = (++it)->u.operand;
1118             int r2 = (++it)->u.operand;
1119             printLocationAndOp(out, exec, location, it, "get_by_val");
1120             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1121             dumpArrayProfiling(out, it, hasPrintedProfiling);
1122             dumpValueProfiling(out, it, hasPrintedProfiling);
1123             break;
1124         }
1125         case op_put_by_val: {
1126             int r0 = (++it)->u.operand;
1127             int r1 = (++it)->u.operand;
1128             int r2 = (++it)->u.operand;
1129             printLocationAndOp(out, exec, location, it, "put_by_val");
1130             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1131             dumpArrayProfiling(out, it, hasPrintedProfiling);
1132             break;
1133         }
1134         case op_put_by_val_direct: {
1135             int r0 = (++it)->u.operand;
1136             int r1 = (++it)->u.operand;
1137             int r2 = (++it)->u.operand;
1138             printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1139             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1140             dumpArrayProfiling(out, it, hasPrintedProfiling);
1141             break;
1142         }
1143         case op_del_by_val: {
1144             int r0 = (++it)->u.operand;
1145             int r1 = (++it)->u.operand;
1146             int r2 = (++it)->u.operand;
1147             printLocationAndOp(out, exec, location, it, "del_by_val");
1148             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1149             break;
1150         }
1151         case op_put_by_index: {
1152             int r0 = (++it)->u.operand;
1153             unsigned n0 = (++it)->u.operand;
1154             int r1 = (++it)->u.operand;
1155             printLocationAndOp(out, exec, location, it, "put_by_index");
1156             out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1157             break;
1158         }
1159         case op_jmp: {
1160             int offset = (++it)->u.operand;
1161             printLocationAndOp(out, exec, location, it, "jmp");
1162             out.printf("%d(->%d)", offset, location + offset);
1163             break;
1164         }
1165         case op_jtrue: {
1166             printConditionalJump(out, exec, begin, it, location, "jtrue");
1167             break;
1168         }
1169         case op_jfalse: {
1170             printConditionalJump(out, exec, begin, it, location, "jfalse");
1171             break;
1172         }
1173         case op_jeq_null: {
1174             printConditionalJump(out, exec, begin, it, location, "jeq_null");
1175             break;
1176         }
1177         case op_jneq_null: {
1178             printConditionalJump(out, exec, begin, it, location, "jneq_null");
1179             break;
1180         }
1181         case op_jneq_ptr: {
1182             int r0 = (++it)->u.operand;
1183             Special::Pointer pointer = (++it)->u.specialPointer;
1184             int offset = (++it)->u.operand;
1185             printLocationAndOp(out, exec, location, it, "jneq_ptr");
1186             out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1187             break;
1188         }
1189         case op_jless: {
1190             int r0 = (++it)->u.operand;
1191             int r1 = (++it)->u.operand;
1192             int offset = (++it)->u.operand;
1193             printLocationAndOp(out, exec, location, it, "jless");
1194             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1195             break;
1196         }
1197         case op_jlesseq: {
1198             int r0 = (++it)->u.operand;
1199             int r1 = (++it)->u.operand;
1200             int offset = (++it)->u.operand;
1201             printLocationAndOp(out, exec, location, it, "jlesseq");
1202             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1203             break;
1204         }
1205         case op_jgreater: {
1206             int r0 = (++it)->u.operand;
1207             int r1 = (++it)->u.operand;
1208             int offset = (++it)->u.operand;
1209             printLocationAndOp(out, exec, location, it, "jgreater");
1210             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1211             break;
1212         }
1213         case op_jgreatereq: {
1214             int r0 = (++it)->u.operand;
1215             int r1 = (++it)->u.operand;
1216             int offset = (++it)->u.operand;
1217             printLocationAndOp(out, exec, location, it, "jgreatereq");
1218             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1219             break;
1220         }
1221         case op_jnless: {
1222             int r0 = (++it)->u.operand;
1223             int r1 = (++it)->u.operand;
1224             int offset = (++it)->u.operand;
1225             printLocationAndOp(out, exec, location, it, "jnless");
1226             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1227             break;
1228         }
1229         case op_jnlesseq: {
1230             int r0 = (++it)->u.operand;
1231             int r1 = (++it)->u.operand;
1232             int offset = (++it)->u.operand;
1233             printLocationAndOp(out, exec, location, it, "jnlesseq");
1234             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1235             break;
1236         }
1237         case op_jngreater: {
1238             int r0 = (++it)->u.operand;
1239             int r1 = (++it)->u.operand;
1240             int offset = (++it)->u.operand;
1241             printLocationAndOp(out, exec, location, it, "jngreater");
1242             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1243             break;
1244         }
1245         case op_jngreatereq: {
1246             int r0 = (++it)->u.operand;
1247             int r1 = (++it)->u.operand;
1248             int offset = (++it)->u.operand;
1249             printLocationAndOp(out, exec, location, it, "jngreatereq");
1250             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1251             break;
1252         }
1253         case op_loop_hint: {
1254             printLocationAndOp(out, exec, location, it, "loop_hint");
1255             break;
1256         }
1257         case op_switch_imm: {
1258             int tableIndex = (++it)->u.operand;
1259             int defaultTarget = (++it)->u.operand;
1260             int scrutineeRegister = (++it)->u.operand;
1261             printLocationAndOp(out, exec, location, it, "switch_imm");
1262             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1263             break;
1264         }
1265         case op_switch_char: {
1266             int tableIndex = (++it)->u.operand;
1267             int defaultTarget = (++it)->u.operand;
1268             int scrutineeRegister = (++it)->u.operand;
1269             printLocationAndOp(out, exec, location, it, "switch_char");
1270             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1271             break;
1272         }
1273         case op_switch_string: {
1274             int tableIndex = (++it)->u.operand;
1275             int defaultTarget = (++it)->u.operand;
1276             int scrutineeRegister = (++it)->u.operand;
1277             printLocationAndOp(out, exec, location, it, "switch_string");
1278             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1279             break;
1280         }
1281         case op_new_func: {
1282             int r0 = (++it)->u.operand;
1283             int r1 = (++it)->u.operand;
1284             int f0 = (++it)->u.operand;
1285             printLocationAndOp(out, exec, location, it, "new_func");
1286             out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1287             break;
1288         }
1289         case op_new_func_exp: {
1290             int r0 = (++it)->u.operand;
1291             int r1 = (++it)->u.operand;
1292             int f0 = (++it)->u.operand;
1293             printLocationAndOp(out, exec, location, it, "new_func_exp");
1294             out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1295             break;
1296         }
1297         case op_call: {
1298             printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1299             break;
1300         }
1301         case op_call_eval: {
1302             printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
1303             break;
1304         }
1305             
1306         case op_construct_varargs:
1307         case op_call_varargs: {
1308             int result = (++it)->u.operand;
1309             int callee = (++it)->u.operand;
1310             int thisValue = (++it)->u.operand;
1311             int arguments = (++it)->u.operand;
1312             int firstFreeRegister = (++it)->u.operand;
1313             int varArgOffset = (++it)->u.operand;
1314             ++it;
1315             printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : "construct_varargs");
1316             out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
1317             dumpValueProfiling(out, it, hasPrintedProfiling);
1318             break;
1319         }
1320
1321         case op_ret: {
1322             int r0 = (++it)->u.operand;
1323             printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1324             break;
1325         }
1326         case op_construct: {
1327             printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
1328             break;
1329         }
1330         case op_strcat: {
1331             int r0 = (++it)->u.operand;
1332             int r1 = (++it)->u.operand;
1333             int count = (++it)->u.operand;
1334             printLocationAndOp(out, exec, location, it, "strcat");
1335             out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1336             break;
1337         }
1338         case op_to_primitive: {
1339             int r0 = (++it)->u.operand;
1340             int r1 = (++it)->u.operand;
1341             printLocationAndOp(out, exec, location, it, "to_primitive");
1342             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1343             break;
1344         }
1345         case op_get_enumerable_length: {
1346             int dst = it[1].u.operand;
1347             int base = it[2].u.operand;
1348             printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
1349             out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1350             it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
1351             break;
1352         }
1353         case op_has_indexed_property: {
1354             int dst = it[1].u.operand;
1355             int base = it[2].u.operand;
1356             int propertyName = it[3].u.operand;
1357             ArrayProfile* arrayProfile = it[4].u.arrayProfile;
1358             printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
1359             out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
1360             it += OPCODE_LENGTH(op_has_indexed_property) - 1;
1361             break;
1362         }
1363         case op_has_structure_property: {
1364             int dst = it[1].u.operand;
1365             int base = it[2].u.operand;
1366             int propertyName = it[3].u.operand;
1367             int enumerator = it[4].u.operand;
1368             printLocationAndOp(out, exec, location, it, "op_has_structure_property");
1369             out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
1370             it += OPCODE_LENGTH(op_has_structure_property) - 1;
1371             break;
1372         }
1373         case op_has_generic_property: {
1374             int dst = it[1].u.operand;
1375             int base = it[2].u.operand;
1376             int propertyName = it[3].u.operand;
1377             printLocationAndOp(out, exec, location, it, "op_has_generic_property");
1378             out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
1379             it += OPCODE_LENGTH(op_has_generic_property) - 1;
1380             break;
1381         }
1382         case op_get_direct_pname: {
1383             int dst = it[1].u.operand;
1384             int base = it[2].u.operand;
1385             int propertyName = it[3].u.operand;
1386             int index = it[4].u.operand;
1387             int enumerator = it[5].u.operand;
1388             ValueProfile* profile = it[6].u.profile;
1389             printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
1390             out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
1391             it += OPCODE_LENGTH(op_get_direct_pname) - 1;
1392             break;
1393
1394         }
1395         case op_get_property_enumerator: {
1396             int dst = it[1].u.operand;
1397             int base = it[2].u.operand;
1398             printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
1399             out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1400             it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
1401             break;
1402         }
1403         case op_enumerator_structure_pname: {
1404             int dst = it[1].u.operand;
1405             int enumerator = it[2].u.operand;
1406             int index = it[3].u.operand;
1407             printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
1408             out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1409             it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
1410             break;
1411         }
1412         case op_enumerator_generic_pname: {
1413             int dst = it[1].u.operand;
1414             int enumerator = it[2].u.operand;
1415             int index = it[3].u.operand;
1416             printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
1417             out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1418             it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
1419             break;
1420         }
1421         case op_to_index_string: {
1422             int dst = it[1].u.operand;
1423             int index = it[2].u.operand;
1424             printLocationAndOp(out, exec, location, it, "op_to_index_string");
1425             out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
1426             it += OPCODE_LENGTH(op_to_index_string) - 1;
1427             break;
1428         }
1429         case op_push_with_scope: {
1430             int dst = (++it)->u.operand;
1431             int newScope = (++it)->u.operand;
1432             int currentScope = (++it)->u.operand;
1433             printLocationAndOp(out, exec, location, it, "push_with_scope");
1434             out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data());
1435             break;
1436         }
1437         case op_get_parent_scope: {
1438             int dst = (++it)->u.operand;
1439             int parentScope = (++it)->u.operand;
1440             printLocationAndOp(out, exec, location, it, "get_parent_scope");
1441             out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data());
1442             break;
1443         }
1444         case op_create_lexical_environment: {
1445             int dst = (++it)->u.operand;
1446             int scope = (++it)->u.operand;
1447             int symbolTable = (++it)->u.operand;
1448             int initialValue = (++it)->u.operand;
1449             printLocationAndOp(out, exec, location, it, "create_lexical_environment");
1450             out.printf("%s, %s, %s, %s", 
1451                 registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data());
1452             break;
1453         }
1454         case op_catch: {
1455             int r0 = (++it)->u.operand;
1456             int r1 = (++it)->u.operand;
1457             printLocationAndOp(out, exec, location, it, "catch");
1458             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1459             break;
1460         }
1461         case op_throw: {
1462             int r0 = (++it)->u.operand;
1463             printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1464             break;
1465         }
1466         case op_throw_static_error: {
1467             int k0 = (++it)->u.operand;
1468             int k1 = (++it)->u.operand;
1469             printLocationAndOp(out, exec, location, it, "throw_static_error");
1470             out.printf("%s, %s", constantName(k0).data(), k1 ? "true" : "false");
1471             break;
1472         }
1473         case op_debug: {
1474             int debugHookID = (++it)->u.operand;
1475             int hasBreakpointFlag = (++it)->u.operand;
1476             printLocationAndOp(out, exec, location, it, "debug");
1477             out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
1478             break;
1479         }
1480         case op_profile_will_call: {
1481             int function = (++it)->u.operand;
1482             printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
1483             break;
1484         }
1485         case op_profile_did_call: {
1486             int function = (++it)->u.operand;
1487             printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
1488             break;
1489         }
1490         case op_end: {
1491             int r0 = (++it)->u.operand;
1492             printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1493             break;
1494         }
1495         case op_resolve_scope: {
1496             int r0 = (++it)->u.operand;
1497             int scope = (++it)->u.operand;
1498             int id0 = (++it)->u.operand;
1499             ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1500             int depth = (++it)->u.operand;
1501             printLocationAndOp(out, exec, location, it, "resolve_scope");
1502             out.printf("%s, %s, %s, %u<%s|%s>, %d", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(),
1503                 modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1504                 depth);
1505             ++it;
1506             break;
1507         }
1508         case op_get_from_scope: {
1509             int r0 = (++it)->u.operand;
1510             int r1 = (++it)->u.operand;
1511             int id0 = (++it)->u.operand;
1512             ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1513             ++it; // Structure
1514             int operand = (++it)->u.operand; // Operand
1515             printLocationAndOp(out, exec, location, it, "get_from_scope");
1516             out.print(registerName(r0), ", ", registerName(r1));
1517             if (static_cast<unsigned>(id0) == UINT_MAX)
1518                 out.print(", anonymous");
1519             else
1520                 out.print(", ", idName(id0, identifier(id0)));
1521             out.print(", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, ", operand);
1522             dumpValueProfiling(out, it, hasPrintedProfiling);
1523             break;
1524         }
1525         case op_put_to_scope: {
1526             int r0 = (++it)->u.operand;
1527             int id0 = (++it)->u.operand;
1528             int r1 = (++it)->u.operand;
1529             ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1530             ++it; // Structure
1531             int operand = (++it)->u.operand; // Operand
1532             printLocationAndOp(out, exec, location, it, "put_to_scope");
1533             out.print(registerName(r0));
1534             if (static_cast<unsigned>(id0) == UINT_MAX)
1535                 out.print(", anonymous");
1536             else
1537                 out.print(", ", idName(id0, identifier(id0)));
1538             out.print(", ", registerName(r1), ", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, <structure>, ", operand);
1539             break;
1540         }
1541         case op_get_from_arguments: {
1542             int r0 = (++it)->u.operand;
1543             int r1 = (++it)->u.operand;
1544             int offset = (++it)->u.operand;
1545             printLocationAndOp(out, exec, location, it, "get_from_arguments");
1546             out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
1547             dumpValueProfiling(out, it, hasPrintedProfiling);
1548             break;
1549         }
1550         case op_put_to_arguments: {
1551             int r0 = (++it)->u.operand;
1552             int offset = (++it)->u.operand;
1553             int r1 = (++it)->u.operand;
1554             printLocationAndOp(out, exec, location, it, "put_to_arguments");
1555             out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
1556             break;
1557         }
1558         default:
1559             RELEASE_ASSERT_NOT_REACHED();
1560     }
1561
1562     dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1563     dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1564     
1565 #if ENABLE(DFG_JIT)
1566     Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1567     if (!exitSites.isEmpty()) {
1568         out.print(" !! frequent exits: ");
1569         CommaPrinter comma;
1570         for (unsigned i = 0; i < exitSites.size(); ++i)
1571             out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
1572     }
1573 #else // ENABLE(DFG_JIT)
1574     UNUSED_PARAM(location);
1575 #endif // ENABLE(DFG_JIT)
1576     out.print("\n");
1577 }
1578
1579 void CodeBlock::dumpBytecode(
1580     PrintStream& out, unsigned bytecodeOffset,
1581     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
1582 {
1583     ExecState* exec = m_globalObject->globalExec();
1584     const Instruction* it = instructions().begin() + bytecodeOffset;
1585     dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
1586 }
1587
1588 #define FOR_EACH_MEMBER_VECTOR(macro) \
1589     macro(instructions) \
1590     macro(callLinkInfos) \
1591     macro(linkedCallerList) \
1592     macro(identifiers) \
1593     macro(functionExpressions) \
1594     macro(constantRegisters)
1595
1596 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1597     macro(regexps) \
1598     macro(functions) \
1599     macro(exceptionHandlers) \
1600     macro(switchJumpTables) \
1601     macro(stringSwitchJumpTables) \
1602     macro(evalCodeCache) \
1603     macro(expressionInfo) \
1604     macro(lineInfo) \
1605     macro(callReturnIndexVector)
1606
1607 template<typename T>
1608 static size_t sizeInBytes(const Vector<T>& vector)
1609 {
1610     return vector.capacity() * sizeof(T);
1611 }
1612
1613 namespace {
1614
1615 class PutToScopeFireDetail : public FireDetail {
1616 public:
1617     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
1618         : m_codeBlock(codeBlock)
1619         , m_ident(ident)
1620     {
1621     }
1622     
1623     virtual void dump(PrintStream& out) const override
1624     {
1625         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
1626     }
1627     
1628 private:
1629     CodeBlock* m_codeBlock;
1630     const Identifier& m_ident;
1631 };
1632
1633 } // anonymous namespace
1634
1635 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1636     : m_globalObject(other.m_globalObject)
1637     , m_heap(other.m_heap)
1638     , m_numCalleeRegisters(other.m_numCalleeRegisters)
1639     , m_numVars(other.m_numVars)
1640     , m_isConstructor(other.m_isConstructor)
1641     , m_shouldAlwaysBeInlined(true)
1642     , m_didFailFTLCompilation(false)
1643     , m_hasBeenCompiledWithFTL(false)
1644     , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1645     , m_hasDebuggerStatement(false)
1646     , m_steppingMode(SteppingModeDisabled)
1647     , m_numBreakpoints(0)
1648     , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1649     , m_vm(other.m_vm)
1650     , m_instructions(other.m_instructions)
1651     , m_thisRegister(other.m_thisRegister)
1652     , m_scopeRegister(other.m_scopeRegister)
1653     , m_lexicalEnvironmentRegister(other.m_lexicalEnvironmentRegister)
1654     , m_isStrictMode(other.m_isStrictMode)
1655     , m_needsActivation(other.m_needsActivation)
1656     , m_mayBeExecuting(false)
1657     , m_source(other.m_source)
1658     , m_sourceOffset(other.m_sourceOffset)
1659     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1660     , m_codeType(other.m_codeType)
1661     , m_constantRegisters(other.m_constantRegisters)
1662     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
1663     , m_functionDecls(other.m_functionDecls)
1664     , m_functionExprs(other.m_functionExprs)
1665     , m_osrExitCounter(0)
1666     , m_optimizationDelayCounter(0)
1667     , m_reoptimizationRetryCounter(0)
1668     , m_hash(other.m_hash)
1669 #if ENABLE(JIT)
1670     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1671 #endif
1672 {
1673     m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1674
1675     ASSERT(m_heap->isDeferred());
1676     ASSERT(m_scopeRegister.isLocal());
1677
1678     setNumParameters(other.numParameters());
1679     optimizeAfterWarmUp();
1680     jitAfterWarmUp();
1681
1682     if (other.m_rareData) {
1683         createRareDataIfNecessary();
1684         
1685         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1686         m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1687         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1688         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1689     }
1690     
1691     m_heap->m_codeBlocks.add(this);
1692     m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock));
1693 }
1694
1695 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1696     : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1697     , m_heap(&m_globalObject->vm().heap)
1698     , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1699     , m_numVars(unlinkedCodeBlock->m_numVars)
1700     , m_isConstructor(unlinkedCodeBlock->isConstructor())
1701     , m_shouldAlwaysBeInlined(true)
1702     , m_didFailFTLCompilation(false)
1703     , m_hasBeenCompiledWithFTL(false)
1704     , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1705     , m_hasDebuggerStatement(false)
1706     , m_steppingMode(SteppingModeDisabled)
1707     , m_numBreakpoints(0)
1708     , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1709     , m_vm(unlinkedCodeBlock->vm())
1710     , m_thisRegister(unlinkedCodeBlock->thisRegister())
1711     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
1712     , m_lexicalEnvironmentRegister(unlinkedCodeBlock->activationRegister())
1713     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1714     , m_needsActivation(unlinkedCodeBlock->hasActivationRegister() && unlinkedCodeBlock->codeType() == FunctionCode)
1715     , m_mayBeExecuting(false)
1716     , m_source(sourceProvider)
1717     , m_sourceOffset(sourceOffset)
1718     , m_firstLineColumnOffset(firstLineColumnOffset)
1719     , m_codeType(unlinkedCodeBlock->codeType())
1720     , m_osrExitCounter(0)
1721     , m_optimizationDelayCounter(0)
1722     , m_reoptimizationRetryCounter(0)
1723 #if ENABLE(JIT)
1724     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1725 #endif
1726 {
1727     m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1728
1729     ASSERT(m_heap->isDeferred());
1730     ASSERT(m_scopeRegister.isLocal());
1731
1732     ASSERT(m_source);
1733     setNumParameters(unlinkedCodeBlock->numParameters());
1734
1735     if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1736         vm()->functionHasExecutedCache()->removeUnexecutedRange(m_ownerExecutable->sourceID(), m_ownerExecutable->typeProfilingStartOffset(), m_ownerExecutable->typeProfilingEndOffset());
1737
1738     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
1739     if (unlinkedCodeBlock->usesGlobalObject())
1740         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, ownerExecutable, m_globalObject.get());
1741
1742     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
1743         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
1744         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
1745             m_constantRegisters[registerIndex].set(*m_vm, ownerExecutable, m_globalObject->jsCellForLinkTimeConstant(type));
1746     }
1747
1748     HashSet<int, WTF::IntHash<int>, WTF::UnsignedWithZeroKeyHashTraits<int>> clonedConstantSymbolTables;
1749     {
1750         HashSet<SymbolTable*> clonedSymbolTables;
1751         for (unsigned i = 0; i < m_constantRegisters.size(); i++) {
1752             if (m_constantRegisters[i].get().isEmpty())
1753                 continue;
1754             if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(m_constantRegisters[i].get())) {
1755                 RELEASE_ASSERT(clonedSymbolTables.add(symbolTable).isNewEntry);
1756                 if (m_vm->typeProfiler()) {
1757                     ConcurrentJITLocker locker(symbolTable->m_lock);
1758                     symbolTable->prepareForTypeProfiling(locker);
1759                 }
1760                 m_constantRegisters[i].set(*m_vm, ownerExecutable, symbolTable->cloneScopePart(*m_vm));
1761                 clonedConstantSymbolTables.add(i + FirstConstantRegisterIndex);
1762             }
1763         }
1764     }
1765
1766     m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
1767     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1768         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1769         if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1770             vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1771         m_functionDecls[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1772     }
1773
1774     m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
1775     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1776         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1777         if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1778             vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1779         m_functionExprs[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1780     }
1781
1782     if (unlinkedCodeBlock->hasRareData()) {
1783         createRareDataIfNecessary();
1784         if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1785             m_rareData->m_constantBuffers.grow(count);
1786             for (size_t i = 0; i < count; i++) {
1787                 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1788                 m_rareData->m_constantBuffers[i] = buffer;
1789             }
1790         }
1791         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1792             m_rareData->m_exceptionHandlers.resizeToFit(count);
1793             for (size_t i = 0; i < count; i++) {
1794                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
1795                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
1796 #if ENABLE(JIT)
1797                 handler.initialize(unlinkedHandler, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
1798 #else
1799                 handler.initialize(unlinkedHandler);
1800 #endif
1801             }
1802         }
1803
1804         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1805             m_rareData->m_stringSwitchJumpTables.grow(count);
1806             for (size_t i = 0; i < count; i++) {
1807                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1808                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1809                 for (; ptr != end; ++ptr) {
1810                     OffsetLocation offset;
1811                     offset.branchOffset = ptr->value;
1812                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1813                 }
1814             }
1815         }
1816
1817         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1818             m_rareData->m_switchJumpTables.grow(count);
1819             for (size_t i = 0; i < count; i++) {
1820                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1821                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1822                 destTable.branchOffsets = sourceTable.branchOffsets;
1823                 destTable.min = sourceTable.min;
1824             }
1825         }
1826     }
1827
1828     // Allocate metadata buffers for the bytecode
1829     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1830         m_llintCallLinkInfos.resizeToFit(size);
1831     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1832         m_arrayProfiles.grow(size);
1833     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1834         m_arrayAllocationProfiles.resizeToFit(size);
1835     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1836         m_valueProfiles.resizeToFit(size);
1837     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1838         m_objectAllocationProfiles.resizeToFit(size);
1839
1840     // Copy and translate the UnlinkedInstructions
1841     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
1842     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
1843
1844     Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1845
1846     for (unsigned i = 0; !instructionReader.atEnd(); ) {
1847         const UnlinkedInstruction* pc = instructionReader.next();
1848
1849         unsigned opLength = opcodeLength(pc[0].u.opcode);
1850
1851         instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
1852         for (size_t j = 1; j < opLength; ++j) {
1853             if (sizeof(int32_t) != sizeof(intptr_t))
1854                 instructions[i + j].u.pointer = 0;
1855             instructions[i + j].u.operand = pc[j].u.operand;
1856         }
1857         switch (pc[0].u.opcode) {
1858         case op_has_indexed_property: {
1859             int arrayProfileIndex = pc[opLength - 1].u.operand;
1860             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1861
1862             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1863             break;
1864         }
1865         case op_call_varargs:
1866         case op_construct_varargs:
1867         case op_get_by_val: {
1868             int arrayProfileIndex = pc[opLength - 2].u.operand;
1869             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1870
1871             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1872             FALLTHROUGH;
1873         }
1874         case op_get_direct_pname:
1875         case op_get_by_id:
1876         case op_get_from_arguments: {
1877             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1878             ASSERT(profile->m_bytecodeOffset == -1);
1879             profile->m_bytecodeOffset = i;
1880             instructions[i + opLength - 1] = profile;
1881             break;
1882         }
1883         case op_put_by_val: {
1884             int arrayProfileIndex = pc[opLength - 1].u.operand;
1885             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1886             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1887             break;
1888         }
1889         case op_put_by_val_direct: {
1890             int arrayProfileIndex = pc[opLength - 1].u.operand;
1891             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1892             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1893             break;
1894         }
1895
1896         case op_new_array:
1897         case op_new_array_buffer:
1898         case op_new_array_with_size: {
1899             int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
1900             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1901             break;
1902         }
1903         case op_new_object: {
1904             int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
1905             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1906             int inferredInlineCapacity = pc[opLength - 2].u.operand;
1907
1908             instructions[i + opLength - 1] = objectAllocationProfile;
1909             objectAllocationProfile->initialize(*vm(),
1910                 m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
1911             break;
1912         }
1913
1914         case op_call:
1915         case op_call_eval: {
1916             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1917             ASSERT(profile->m_bytecodeOffset == -1);
1918             profile->m_bytecodeOffset = i;
1919             instructions[i + opLength - 1] = profile;
1920             int arrayProfileIndex = pc[opLength - 2].u.operand;
1921             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1922             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1923             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1924             break;
1925         }
1926         case op_construct: {
1927             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1928             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1929             ASSERT(profile->m_bytecodeOffset == -1);
1930             profile->m_bytecodeOffset = i;
1931             instructions[i + opLength - 1] = profile;
1932             break;
1933         }
1934         case op_get_by_id_out_of_line:
1935         case op_get_array_length:
1936             CRASH();
1937
1938         case op_create_lexical_environment: {
1939             int symbolTableIndex = pc[3].u.operand;
1940             RELEASE_ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex));
1941             break;
1942         }
1943
1944         case op_resolve_scope: {
1945             const Identifier& ident = identifier(pc[3].u.operand);
1946             ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
1947             RELEASE_ASSERT(type != LocalClosureVar);
1948             int localScopeDepth = pc[5].u.operand;
1949
1950             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type);
1951             instructions[i + 4].u.operand = op.type;
1952             instructions[i + 5].u.operand = op.depth;
1953             if (op.lexicalEnvironment)
1954                 instructions[i + 6].u.symbolTable.set(*vm(), ownerExecutable, op.lexicalEnvironment->symbolTable());
1955             else
1956                 instructions[i + 6].u.pointer = nullptr;
1957             break;
1958         }
1959
1960         case op_get_from_scope: {
1961             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1962             ASSERT(profile->m_bytecodeOffset == -1);
1963             profile->m_bytecodeOffset = i;
1964             instructions[i + opLength - 1] = profile;
1965
1966             // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
1967
1968             int localScopeDepth = pc[5].u.operand;
1969             instructions[i + 5].u.pointer = nullptr;
1970
1971             ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
1972             if (modeAndType.type() == LocalClosureVar) {
1973                 instructions[i + 4] = ResolveModeAndType(modeAndType.mode(), ClosureVar).operand();
1974                 break;
1975             }
1976
1977             const Identifier& ident = identifier(pc[3].u.operand);
1978             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, modeAndType.type());
1979
1980             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1981             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
1982                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
1983             else if (op.structure)
1984                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1985             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1986             break;
1987         }
1988
1989         case op_put_to_scope: {
1990             // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
1991             ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
1992             if (modeAndType.type() == LocalClosureVar) {
1993                 // Only do watching if the property we're putting to is not anonymous.
1994                 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
1995                     int symbolTableIndex = pc[5].u.operand;
1996                     RELEASE_ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex));
1997                     SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
1998                     const Identifier& ident = identifier(pc[2].u.operand);
1999                     ConcurrentJITLocker locker(symbolTable->m_lock);
2000                     auto iter = symbolTable->find(locker, ident.impl());
2001                     RELEASE_ASSERT(iter != symbolTable->end(locker));
2002                     iter->value.prepareToWatch();
2003                     instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
2004                 } else
2005                     instructions[i + 5].u.watchpointSet = nullptr;
2006                 break;
2007             }
2008
2009             const Identifier& ident = identifier(pc[2].u.operand);
2010             int localScopeDepth = pc[5].u.operand;
2011             instructions[i + 5].u.pointer = nullptr;
2012             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, modeAndType.type());
2013
2014             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
2015             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
2016                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2017             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
2018                 if (op.watchpointSet)
2019                     op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident));
2020             } else if (op.structure)
2021                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
2022             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2023
2024             break;
2025         }
2026
2027         case op_profile_type: {
2028             RELEASE_ASSERT(vm()->typeProfiler());
2029             // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
2030             size_t instructionOffset = i + opLength - 1;
2031             unsigned divotStart, divotEnd;
2032             GlobalVariableID globalVariableID = 0;
2033             RefPtr<TypeSet> globalTypeSet;
2034             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
2035             VirtualRegister profileRegister(pc[1].u.operand);
2036             ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
2037             SymbolTable* symbolTable = nullptr;
2038
2039             switch (flag) {
2040             case ProfileTypeBytecodeClosureVar: {
2041                 const Identifier& ident = identifier(pc[4].u.operand);
2042                 int localScopeDepth = pc[2].u.operand;
2043                 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
2044                 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because
2045                 // we're abstractly "read"ing from a JSScope.
2046                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type);
2047
2048                 if (op.type == ClosureVar)
2049                     symbolTable = op.lexicalEnvironment->symbolTable();
2050                 else if (op.type == GlobalVar)
2051                     symbolTable = m_globalObject.get()->symbolTable();
2052                 
2053                 if (symbolTable) {
2054                     ConcurrentJITLocker locker(symbolTable->m_lock);
2055                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2056                     symbolTable->prepareForTypeProfiling(locker);
2057                     globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2058                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2059                 } else
2060                     globalVariableID = TypeProfilerNoGlobalIDExists;
2061
2062                 break;
2063             }
2064             case ProfileTypeBytecodeLocallyResolved: {
2065                 int symbolTableIndex = pc[2].u.operand;
2066                 RELEASE_ASSERT(clonedConstantSymbolTables.contains(symbolTableIndex));
2067                 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex));
2068                 const Identifier& ident = identifier(pc[4].u.operand);
2069                 ConcurrentJITLocker locker(symbolTable->m_lock);
2070                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2071                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2072                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2073
2074                 break;
2075             }
2076             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
2077             case ProfileTypeBytecodeFunctionArgument: {
2078                 globalVariableID = TypeProfilerNoGlobalIDExists;
2079                 break;
2080             }
2081             case ProfileTypeBytecodeFunctionReturnStatement: {
2082                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
2083                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
2084                 globalVariableID = TypeProfilerReturnStatement;
2085                 if (!shouldAnalyze) {
2086                     // Because a return statement can be added implicitly to return undefined at the end of a function,
2087                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
2088                     // the user's program, give the type profiler some range to identify these return statements.
2089                     // Currently, the text offset that is used as identification is on the open brace of the function 
2090                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
2091                     divotStart = divotEnd = m_sourceOffset;
2092                     shouldAnalyze = true;
2093                 }
2094                 break;
2095             }
2096             }
2097
2098             std::pair<TypeLocation*, bool> locationPair = vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
2099                 m_ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, vm());
2100             TypeLocation* location = locationPair.first;
2101             bool isNewLocation = locationPair.second;
2102
2103             if (flag == ProfileTypeBytecodeFunctionReturnStatement)
2104                 location->m_divotForFunctionOffsetIfReturnStatement = m_sourceOffset;
2105
2106             if (shouldAnalyze && isNewLocation)
2107                 vm()->typeProfiler()->insertNewLocation(location);
2108
2109             instructions[i + 2].u.location = location;
2110             break;
2111         }
2112
2113         case op_debug: {
2114             if (pc[1].u.index == DidReachBreakpoint)
2115                 m_hasDebuggerStatement = true;
2116             break;
2117         }
2118
2119         default:
2120             break;
2121         }
2122         i += opLength;
2123     }
2124
2125     if (vm()->controlFlowProfiler())
2126         insertBasicBlockBoundariesForControlFlowProfiler(instructions);
2127
2128     m_instructions = WTF::RefCountedArray<Instruction>(instructions);
2129
2130     // Set optimization thresholds only after m_instructions is initialized, since these
2131     // rely on the instruction count (and are in theory permitted to also inspect the
2132     // instruction stream to more accurate assess the cost of tier-up).
2133     optimizeAfterWarmUp();
2134     jitAfterWarmUp();
2135
2136     // If the concurrent thread will want the code block's hash, then compute it here
2137     // synchronously.
2138     if (Options::alwaysComputeHash())
2139         hash();
2140
2141     if (Options::dumpGeneratedBytecodes())
2142         dumpBytecode();
2143     
2144     m_heap->m_codeBlocks.add(this);
2145     m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
2146 }
2147
2148 CodeBlock::~CodeBlock()
2149 {
2150     if (m_vm->m_perBytecodeProfiler)
2151         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
2152     
2153 #if ENABLE(VERBOSE_VALUE_PROFILE)
2154     dumpValueProfiles();
2155 #endif
2156     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2157         m_incomingLLIntCalls.begin()->remove();
2158 #if ENABLE(JIT)
2159     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
2160     // Consider that two CodeBlocks become unreachable at the same time. There
2161     // is no guarantee about the order in which the CodeBlocks are destroyed.
2162     // So, if we don't remove incoming calls, and get destroyed before the
2163     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
2164     // destructor will try to remove nodes from our (no longer valid) linked list.
2165     while (m_incomingCalls.begin() != m_incomingCalls.end())
2166         m_incomingCalls.begin()->remove();
2167     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
2168         m_incomingPolymorphicCalls.begin()->remove();
2169     
2170     // Note that our outgoing calls will be removed from other CodeBlocks'
2171     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
2172     // destructors.
2173
2174     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
2175         (*iter)->deref();
2176 #endif // ENABLE(JIT)
2177 }
2178
2179 void CodeBlock::setNumParameters(int newValue)
2180 {
2181     m_numParameters = newValue;
2182
2183     m_argumentValueProfiles.resizeToFit(newValue);
2184 }
2185
2186 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
2187 {
2188     EvalCacheMap::iterator end = m_cacheMap.end();
2189     for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
2190         visitor.append(&ptr->value);
2191 }
2192
2193 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
2194 {
2195 #if ENABLE(FTL_JIT)
2196     if (jitType() != JITCode::DFGJIT)
2197         return 0;
2198     DFG::JITCode* jitCode = m_jitCode->dfg();
2199     return jitCode->osrEntryBlock.get();
2200 #else // ENABLE(FTL_JIT)
2201     return 0;
2202 #endif // ENABLE(FTL_JIT)
2203 }
2204
2205 void CodeBlock::visitAggregate(SlotVisitor& visitor)
2206 {
2207 #if ENABLE(PARALLEL_GC)
2208     // I may be asked to scan myself more than once, and it may even happen concurrently.
2209     // To this end, use an atomic operation to check (and set) if I've been called already.
2210     // Only one thread may proceed past this point - whichever one wins the atomic set race.
2211     bool setByMe = m_visitAggregateHasBeenCalled.compareExchangeStrong(false, true);
2212     if (!setByMe)
2213         return;
2214 #endif // ENABLE(PARALLEL_GC)
2215     
2216     if (!!m_alternative)
2217         m_alternative->visitAggregate(visitor);
2218     
2219     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2220         otherBlock->visitAggregate(visitor);
2221
2222     visitor.reportExtraMemoryVisited(ownerExecutable(), sizeof(CodeBlock));
2223     if (m_jitCode)
2224         visitor.reportExtraMemoryVisited(ownerExecutable(), m_jitCode->size());
2225     if (m_instructions.size()) {
2226         // Divide by refCount() because m_instructions points to something that is shared
2227         // by multiple CodeBlocks, and we only want to count it towards the heap size once.
2228         // Having each CodeBlock report only its proportional share of the size is one way
2229         // of accomplishing this.
2230         visitor.reportExtraMemoryVisited(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
2231     }
2232
2233     visitor.append(&m_unlinkedCode);
2234
2235     // There are three things that may use unconditional finalizers: lazy bytecode freeing,
2236     // inline cache clearing, and jettisoning. The probability of us wanting to do at
2237     // least one of those things is probably quite close to 1. So we add one no matter what
2238     // and when it runs, it figures out whether it has any work to do.
2239     visitor.addUnconditionalFinalizer(this);
2240     
2241     m_allTransitionsHaveBeenMarked = false;
2242     
2243     if (shouldImmediatelyAssumeLivenessDuringScan()) {
2244         // This code block is live, so scan all references strongly and return.
2245         stronglyVisitStrongReferences(visitor);
2246         stronglyVisitWeakReferences(visitor);
2247         propagateTransitions(visitor);
2248         return;
2249     }
2250     
2251     // There are two things that we use weak reference harvesters for: DFG fixpoint for
2252     // jettisoning, and trying to find structures that would be live based on some
2253     // inline cache. So it makes sense to register them regardless.
2254     visitor.addWeakReferenceHarvester(this);
2255
2256 #if ENABLE(DFG_JIT)
2257     // We get here if we're live in the sense that our owner executable is live,
2258     // but we're not yet live for sure in another sense: we may yet decide that this
2259     // code block should be jettisoned based on its outgoing weak references being
2260     // stale. Set a flag to indicate that we're still assuming that we're dead, and
2261     // perform one round of determining if we're live. The GC may determine, based on
2262     // either us marking additional objects, or by other objects being marked for
2263     // other reasons, that this iteration should run again; it will notify us of this
2264     // decision by calling harvestWeakReferences().
2265     
2266     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
2267     
2268     propagateTransitions(visitor);
2269     determineLiveness(visitor);
2270 #else // ENABLE(DFG_JIT)
2271     RELEASE_ASSERT_NOT_REACHED();
2272 #endif // ENABLE(DFG_JIT)
2273 }
2274
2275 bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan()
2276 {
2277 #if ENABLE(DFG_JIT)
2278     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
2279     // their weak references go stale. So if a basline JIT CodeBlock gets
2280     // scanned, we can assume that this means that it's live.
2281     if (!JITCode::isOptimizingJIT(jitType()))
2282         return true;
2283
2284     // For simplicity, we don't attempt to jettison code blocks during GC if
2285     // they are executing. Instead we strongly mark their weak references to
2286     // allow them to continue to execute soundly.
2287     if (m_mayBeExecuting)
2288         return true;
2289
2290     if (Options::forceDFGCodeBlockLiveness())
2291         return true;
2292
2293     return false;
2294 #else
2295     return true;
2296 #endif
2297 }
2298
2299 bool CodeBlock::isKnownToBeLiveDuringGC()
2300 {
2301 #if ENABLE(DFG_JIT)
2302     // This should return true for:
2303     // - Code blocks that behave like normal objects - i.e. if they are referenced then they
2304     //   are live.
2305     // - Code blocks that were running on the stack.
2306     // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
2307     //   because either livenessHasBeenProved would have survived as true or m_mayBeExecuting
2308     //   would survive as true.
2309     // - Code blocks that don't have any dead weak references.
2310     
2311     return shouldImmediatelyAssumeLivenessDuringScan()
2312         || m_jitCode->dfgCommon()->livenessHasBeenProved;
2313 #else
2314     return true;
2315 #endif
2316 }
2317
2318 #if ENABLE(DFG_JIT)
2319 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
2320 {
2321     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
2322         return false;
2323     
2324     if (!Heap::isMarked(transition.m_from.get()))
2325         return false;
2326     
2327     return true;
2328 }
2329 #endif // ENABLE(DFG_JIT)
2330
2331 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
2332 {
2333     UNUSED_PARAM(visitor);
2334
2335     if (m_allTransitionsHaveBeenMarked)
2336         return;
2337
2338     bool allAreMarkedSoFar = true;
2339         
2340     Interpreter* interpreter = m_vm->interpreter;
2341     if (jitType() == JITCode::InterpreterThunk) {
2342         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2343         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2344             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2345             switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2346             case op_put_by_id_transition_direct:
2347             case op_put_by_id_transition_normal:
2348             case op_put_by_id_transition_direct_out_of_line:
2349             case op_put_by_id_transition_normal_out_of_line: {
2350                 if (Heap::isMarked(instruction[4].u.structure.get()))
2351                     visitor.append(&instruction[6].u.structure);
2352                 else
2353                     allAreMarkedSoFar = false;
2354                 break;
2355             }
2356             default:
2357                 break;
2358             }
2359         }
2360     }
2361
2362 #if ENABLE(JIT)
2363     if (JITCode::isJIT(jitType())) {
2364         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2365             StructureStubInfo& stubInfo = **iter;
2366             switch (stubInfo.accessType) {
2367             case access_put_by_id_transition_normal:
2368             case access_put_by_id_transition_direct: {
2369                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2370                 if ((!origin || Heap::isMarked(origin))
2371                     && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
2372                     visitor.append(&stubInfo.u.putByIdTransition.structure);
2373                 else
2374                     allAreMarkedSoFar = false;
2375                 break;
2376             }
2377
2378             case access_put_by_id_list: {
2379                 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
2380                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2381                 if (origin && !Heap::isMarked(origin)) {
2382                     allAreMarkedSoFar = false;
2383                     break;
2384                 }
2385                 for (unsigned j = list->size(); j--;) {
2386                     PutByIdAccess& access = list->m_list[j];
2387                     if (!access.isTransition())
2388                         continue;
2389                     if (Heap::isMarked(access.oldStructure()))
2390                         visitor.append(&access.m_newStructure);
2391                     else
2392                         allAreMarkedSoFar = false;
2393                 }
2394                 break;
2395             }
2396             
2397             default:
2398                 break;
2399             }
2400         }
2401     }
2402 #endif // ENABLE(JIT)
2403     
2404 #if ENABLE(DFG_JIT)
2405     if (JITCode::isOptimizingJIT(jitType())) {
2406         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2407         
2408         for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2409             if (shouldMarkTransition(dfgCommon->transitions[i])) {
2410                 // If the following three things are live, then the target of the
2411                 // transition is also live:
2412                 //
2413                 // - This code block. We know it's live already because otherwise
2414                 //   we wouldn't be scanning ourselves.
2415                 //
2416                 // - The code origin of the transition. Transitions may arise from
2417                 //   code that was inlined. They are not relevant if the user's
2418                 //   object that is required for the inlinee to run is no longer
2419                 //   live.
2420                 //
2421                 // - The source of the transition. The transition checks if some
2422                 //   heap location holds the source, and if so, stores the target.
2423                 //   Hence the source must be live for the transition to be live.
2424                 //
2425                 // We also short-circuit the liveness if the structure is harmless
2426                 // to mark (i.e. its global object and prototype are both already
2427                 // live).
2428                 
2429                 visitor.append(&dfgCommon->transitions[i].m_to);
2430             } else
2431                 allAreMarkedSoFar = false;
2432         }
2433     }
2434 #endif // ENABLE(DFG_JIT)
2435     
2436     if (allAreMarkedSoFar)
2437         m_allTransitionsHaveBeenMarked = true;
2438 }
2439
2440 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2441 {
2442     UNUSED_PARAM(visitor);
2443     
2444     if (shouldImmediatelyAssumeLivenessDuringScan())
2445         return;
2446     
2447 #if ENABLE(DFG_JIT)
2448     // Check if we have any remaining work to do.
2449     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2450     if (dfgCommon->livenessHasBeenProved)
2451         return;
2452     
2453     // Now check all of our weak references. If all of them are live, then we
2454     // have proved liveness and so we scan our strong references. If at end of
2455     // GC we still have not proved liveness, then this code block is toast.
2456     bool allAreLiveSoFar = true;
2457     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2458         if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2459             allAreLiveSoFar = false;
2460             break;
2461         }
2462     }
2463     if (allAreLiveSoFar) {
2464         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
2465             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
2466                 allAreLiveSoFar = false;
2467                 break;
2468             }
2469         }
2470     }
2471     
2472     // If some weak references are dead, then this fixpoint iteration was
2473     // unsuccessful.
2474     if (!allAreLiveSoFar)
2475         return;
2476     
2477     // All weak references are live. Record this information so we don't
2478     // come back here again, and scan the strong references.
2479     dfgCommon->livenessHasBeenProved = true;
2480     stronglyVisitStrongReferences(visitor);
2481 #endif // ENABLE(DFG_JIT)
2482 }
2483
2484 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2485 {
2486     propagateTransitions(visitor);
2487     determineLiveness(visitor);
2488 }
2489
2490 void CodeBlock::finalizeUnconditionally()
2491 {
2492     Interpreter* interpreter = m_vm->interpreter;
2493     if (JITCode::couldBeInterpreted(jitType())) {
2494         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2495         for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2496             Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2497             switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2498             case op_get_by_id:
2499             case op_get_by_id_out_of_line:
2500             case op_put_by_id:
2501             case op_put_by_id_out_of_line:
2502                 if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2503                     break;
2504                 if (Options::verboseOSR())
2505                     dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2506                 curInstruction[4].u.structure.clear();
2507                 curInstruction[5].u.operand = 0;
2508                 break;
2509             case op_put_by_id_transition_direct:
2510             case op_put_by_id_transition_normal:
2511             case op_put_by_id_transition_direct_out_of_line:
2512             case op_put_by_id_transition_normal_out_of_line:
2513                 if (Heap::isMarked(curInstruction[4].u.structure.get())
2514                     && Heap::isMarked(curInstruction[6].u.structure.get())
2515                     && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2516                     break;
2517                 if (Options::verboseOSR()) {
2518                     dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2519                             curInstruction[4].u.structure.get(),
2520                             curInstruction[6].u.structure.get(),
2521                             curInstruction[7].u.structureChain.get());
2522                 }
2523                 curInstruction[4].u.structure.clear();
2524                 curInstruction[6].u.structure.clear();
2525                 curInstruction[7].u.structureChain.clear();
2526                 curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2527                 break;
2528             case op_get_array_length:
2529                 break;
2530             case op_to_this:
2531                 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2532                     break;
2533                 if (Options::verboseOSR())
2534                     dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2535                 curInstruction[2].u.structure.clear();
2536                 curInstruction[3].u.toThisStatus = merge(
2537                     curInstruction[3].u.toThisStatus, ToThisClearedByGC);
2538                 break;
2539             case op_create_this: {
2540                 auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
2541                 if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
2542                     break;
2543                 JSCell* cachedFunction = cacheWriteBarrier.get();
2544                 if (Heap::isMarked(cachedFunction))
2545                     break;
2546                 if (Options::verboseOSR())
2547                     dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
2548                 cacheWriteBarrier.clear();
2549                 break;
2550             }
2551             case op_resolve_scope: {
2552                 // Right now this isn't strictly necessary. Any symbol tables that this will refer to
2553                 // are for outer functions, and we refer to those functions strongly, and they refer
2554                 // to the symbol table strongly. But it's nice to be on the safe side.
2555                 WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
2556                 if (!symbolTable || Heap::isMarked(symbolTable.get()))
2557                     break;
2558                 if (Options::verboseOSR())
2559                     dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
2560                 symbolTable.clear();
2561                 break;
2562             }
2563             case op_get_from_scope:
2564             case op_put_to_scope: {
2565                 ResolveModeAndType modeAndType =
2566                     ResolveModeAndType(curInstruction[4].u.operand);
2567                 if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks || modeAndType.type() == LocalClosureVar)
2568                     continue;
2569                 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2570                 if (!structure || Heap::isMarked(structure.get()))
2571                     break;
2572                 if (Options::verboseOSR())
2573                     dataLogF("Clearing scope access with structure %p.\n", structure.get());
2574                 structure.clear();
2575                 break;
2576             }
2577             default:
2578                 OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
2579                 ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
2580             }
2581         }
2582
2583         for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2584             if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2585                 if (Options::verboseOSR())
2586                     dataLog("Clearing LLInt call from ", *this, "\n");
2587                 m_llintCallLinkInfos[i].unlink();
2588             }
2589             if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2590                 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2591         }
2592     }
2593
2594 #if ENABLE(DFG_JIT)
2595     // Check if we're not live. If we are, then jettison.
2596     if (!isKnownToBeLiveDuringGC()) {
2597         if (Options::verboseOSR())
2598             dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2599
2600         if (DFG::shouldShowDisassembly()) {
2601             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2602             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2603             for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2604                 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
2605                 JSCell* origin = transition.m_codeOrigin.get();
2606                 JSCell* from = transition.m_from.get();
2607                 JSCell* to = transition.m_to.get();
2608                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2609                     continue;
2610                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2611             }
2612             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2613                 JSCell* weak = dfgCommon->weakReferences[i].get();
2614                 if (Heap::isMarked(weak))
2615                     continue;
2616                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2617             }
2618         }
2619         
2620         jettison(Profiler::JettisonDueToWeakReference);
2621         return;
2622     }
2623 #endif // ENABLE(DFG_JIT)
2624
2625 #if ENABLE(JIT)
2626     // Handle inline caches.
2627     if (!!jitCode()) {
2628         RepatchBuffer repatchBuffer(this);
2629         
2630         for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
2631             (*iter)->visitWeak(repatchBuffer);
2632
2633         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2634             StructureStubInfo& stubInfo = **iter;
2635             
2636             if (stubInfo.visitWeakReferences(repatchBuffer))
2637                 continue;
2638             
2639             resetStubDuringGCInternal(repatchBuffer, stubInfo);
2640         }
2641     }
2642 #endif
2643 }
2644
2645 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
2646 {
2647 #if ENABLE(JIT)
2648     toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2649 #else
2650     UNUSED_PARAM(result);
2651 #endif
2652 }
2653
2654 void CodeBlock::getStubInfoMap(StubInfoMap& result)
2655 {
2656     ConcurrentJITLocker locker(m_lock);
2657     getStubInfoMap(locker, result);
2658 }
2659
2660 void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
2661 {
2662 #if ENABLE(JIT)
2663     toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
2664 #else
2665     UNUSED_PARAM(result);
2666 #endif
2667 }
2668
2669 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
2670 {
2671     ConcurrentJITLocker locker(m_lock);
2672     getCallLinkInfoMap(locker, result);
2673 }
2674
2675 void CodeBlock::getByValInfoMap(const ConcurrentJITLocker&, ByValInfoMap& result)
2676 {
2677 #if ENABLE(JIT)
2678     for (auto* byValInfo : m_byValInfos)
2679         result.add(CodeOrigin(byValInfo->bytecodeIndex), byValInfo);
2680 #else
2681     UNUSED_PARAM(result);
2682 #endif
2683 }
2684
2685 void CodeBlock::getByValInfoMap(ByValInfoMap& result)
2686 {
2687     ConcurrentJITLocker locker(m_lock);
2688     getByValInfoMap(locker, result);
2689 }
2690
2691 #if ENABLE(JIT)
2692 StructureStubInfo* CodeBlock::addStubInfo()
2693 {
2694     ConcurrentJITLocker locker(m_lock);
2695     return m_stubInfos.add();
2696 }
2697
2698 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
2699 {
2700     for (StructureStubInfo* stubInfo : m_stubInfos) {
2701         if (stubInfo->codeOrigin == codeOrigin)
2702             return stubInfo;
2703     }
2704     return nullptr;
2705 }
2706
2707 ByValInfo* CodeBlock::addByValInfo()
2708 {
2709     ConcurrentJITLocker locker(m_lock);
2710     return m_byValInfos.add();
2711 }
2712
2713 CallLinkInfo* CodeBlock::addCallLinkInfo()
2714 {
2715     ConcurrentJITLocker locker(m_lock);
2716     return m_callLinkInfos.add();
2717 }
2718
2719 void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2720 {
2721     if (stubInfo.accessType == access_unset)
2722         return;
2723     
2724     ConcurrentJITLocker locker(m_lock);
2725     
2726     RepatchBuffer repatchBuffer(this);
2727     resetStubInternal(repatchBuffer, stubInfo);
2728 }
2729
2730 void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2731 {
2732     AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2733     
2734     if (Options::verboseOSR()) {
2735         // This can be called from GC destructor calls, so we don't try to do a full dump
2736         // of the CodeBlock.
2737         dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
2738     }
2739     
2740     RELEASE_ASSERT(JITCode::isJIT(jitType()));
2741     
2742     if (isGetByIdAccess(accessType))
2743         resetGetByID(repatchBuffer, stubInfo);
2744     else if (isPutByIdAccess(accessType))
2745         resetPutByID(repatchBuffer, stubInfo);
2746     else {
2747         RELEASE_ASSERT(isInAccess(accessType));
2748         resetIn(repatchBuffer, stubInfo);
2749     }
2750     
2751     stubInfo.reset();
2752 }
2753
2754 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2755 {
2756     resetStubInternal(repatchBuffer, stubInfo);
2757     stubInfo.resetByGC = true;
2758 }
2759
2760 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
2761 {
2762     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2763         if ((*iter)->codeOrigin() == CodeOrigin(index))
2764             return *iter;
2765     }
2766     return nullptr;
2767 }
2768 #endif
2769
2770 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2771 {
2772     visitor.append(&m_globalObject);
2773     visitor.append(&m_ownerExecutable);
2774     visitor.append(&m_unlinkedCode);
2775     if (m_rareData)
2776         m_rareData->m_evalCodeCache.visitAggregate(visitor);
2777     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2778     for (size_t i = 0; i < m_functionExprs.size(); ++i)
2779         visitor.append(&m_functionExprs[i]);
2780     for (size_t i = 0; i < m_functionDecls.size(); ++i)
2781         visitor.append(&m_functionDecls[i]);
2782     for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2783         m_objectAllocationProfiles[i].visitAggregate(visitor);
2784
2785 #if ENABLE(DFG_JIT)
2786     if (JITCode::isOptimizingJIT(jitType())) {
2787         // FIXME: This is an antipattern for two reasons. References introduced by the DFG
2788         // that aren't in the original CodeBlock being compiled should be weakly referenced.
2789         // Inline call frames aren't in the original CodeBlock, so they qualify as weak. Also,
2790         // those weak references should already be tracked in the DFG as weak FrozenValues. So,
2791         // there is probably no need for this. We already have assertions that this should be
2792         // unnecessary.
2793         // https://bugs.webkit.org/show_bug.cgi?id=146613
2794         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2795         if (dfgCommon->inlineCallFrames.get())
2796             dfgCommon->inlineCallFrames->visitAggregate(visitor);
2797     }
2798 #endif
2799
2800     updateAllPredictions();
2801 }
2802
2803 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2804 {
2805     UNUSED_PARAM(visitor);
2806
2807 #if ENABLE(DFG_JIT)
2808     if (!JITCode::isOptimizingJIT(jitType()))
2809         return;
2810     
2811     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2812
2813     for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2814         if (!!dfgCommon->transitions[i].m_codeOrigin)
2815             visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2816         visitor.append(&dfgCommon->transitions[i].m_from);
2817         visitor.append(&dfgCommon->transitions[i].m_to);
2818     }
2819     
2820     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2821         visitor.append(&dfgCommon->weakReferences[i]);
2822
2823     for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
2824         visitor.append(&dfgCommon->weakStructureReferences[i]);
2825 #endif    
2826 }
2827
2828 CodeBlock* CodeBlock::baselineAlternative()
2829 {
2830 #if ENABLE(JIT)
2831     CodeBlock* result = this;
2832     while (result->alternative())
2833         result = result->alternative();
2834     RELEASE_ASSERT(result);
2835     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
2836     return result;
2837 #else
2838     return this;
2839 #endif
2840 }
2841
2842 CodeBlock* CodeBlock::baselineVersion()
2843 {
2844 #if ENABLE(JIT)
2845     if (JITCode::isBaselineCode(jitType()))
2846         return this;
2847     CodeBlock* result = replacement();
2848     if (!result) {
2849         // This can happen if we're creating the original CodeBlock for an executable.
2850         // Assume that we're the baseline CodeBlock.
2851         RELEASE_ASSERT(jitType() == JITCode::None);
2852         return this;
2853     }
2854     result = result->baselineAlternative();
2855     return result;
2856 #else
2857     return this;
2858 #endif
2859 }
2860
2861 #if ENABLE(JIT)
2862 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
2863 {
2864     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
2865 }
2866
2867 bool CodeBlock::hasOptimizedReplacement()
2868 {
2869     return hasOptimizedReplacement(jitType());
2870 }
2871 #endif
2872
2873 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler)
2874 {
2875     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2876
2877     if (!m_rareData)
2878         return 0;
2879     
2880     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2881     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2882         HandlerInfo& handler = exceptionHandlers[i];
2883         if ((requiredHandler == RequiredHandler::CatchHandler) && !handler.isCatchHandler())
2884             continue;
2885
2886         // Handlers are ordered innermost first, so the first handler we encounter
2887         // that contains the source address is the correct handler to use.
2888         if (handler.start <= bytecodeOffset && handler.end > bytecodeOffset)
2889             return &handler;
2890     }
2891
2892     return 0;
2893 }
2894
2895 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2896 {
2897     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2898     return m_ownerExecutable->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2899 }
2900
2901 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2902 {
2903     int divot;
2904     int startOffset;
2905     int endOffset;
2906     unsigned line;
2907     unsigned column;
2908     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2909     return column;
2910 }
2911
2912 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2913 {
2914     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2915     divot += m_sourceOffset;
2916     column += line ? 1 : firstLineColumnOffset();
2917     line += m_ownerExecutable->firstLine();
2918 }
2919
2920 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
2921 {
2922     Interpreter* interpreter = vm()->interpreter;
2923     const Instruction* begin = instructions().begin();
2924     const Instruction* end = instructions().end();
2925     for (const Instruction* it = begin; it != end;) {
2926         OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
2927         if (opcodeID == op_debug) {
2928             unsigned bytecodeOffset = it - begin;
2929             int unused;
2930             unsigned opDebugLine;
2931             unsigned opDebugColumn;
2932             expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
2933             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
2934                 return true;
2935         }
2936         it += opcodeLengths[opcodeID];
2937     }
2938     return false;
2939 }
2940
2941 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2942 {
2943     m_rareCaseProfiles.shrinkToFit();
2944     m_specialFastCaseProfiles.shrinkToFit();
2945     
2946     if (shrinkMode == EarlyShrink) {
2947         m_constantRegisters.shrinkToFit();
2948         m_constantsSourceCodeRepresentation.shrinkToFit();
2949         
2950         if (m_rareData) {
2951             m_rareData->m_switchJumpTables.shrinkToFit();
2952             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2953         }
2954     } // else don't shrink these, because we would have already pointed pointers into these tables.
2955 }
2956
2957 #if ENABLE(JIT)
2958 void CodeBlock::unlinkCalls()
2959 {
2960     if (!!m_alternative)
2961         m_alternative->unlinkCalls();
2962     for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2963         if (m_llintCallLinkInfos[i].isLinked())
2964             m_llintCallLinkInfos[i].unlink();
2965     }
2966     if (m_callLinkInfos.isEmpty())
2967         return;
2968     if (!m_vm->canUseJIT())
2969         return;
2970     RepatchBuffer repatchBuffer(this);
2971     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2972         CallLinkInfo& info = **iter;
2973         if (!info.isLinked())
2974             continue;
2975         info.unlink(repatchBuffer);
2976     }
2977 }
2978
2979 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
2980 {
2981     noticeIncomingCall(callerFrame);
2982     m_incomingCalls.push(incoming);
2983 }
2984
2985 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
2986 {
2987     noticeIncomingCall(callerFrame);
2988     m_incomingPolymorphicCalls.push(incoming);
2989 }
2990 #endif // ENABLE(JIT)
2991
2992 void CodeBlock::unlinkIncomingCalls()
2993 {
2994     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2995         m_incomingLLIntCalls.begin()->unlink();
2996 #if ENABLE(JIT)
2997     if (m_incomingCalls.isEmpty() && m_incomingPolymorphicCalls.isEmpty())
2998         return;
2999     RepatchBuffer repatchBuffer(this);
3000     while (m_incomingCalls.begin() != m_incomingCalls.end())
3001         m_incomingCalls.begin()->unlink(repatchBuffer);
3002     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
3003         m_incomingPolymorphicCalls.begin()->unlink(repatchBuffer);
3004 #endif // ENABLE(JIT)
3005 }
3006
3007 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
3008 {
3009     noticeIncomingCall(callerFrame);
3010     m_incomingLLIntCalls.push(incoming);
3011 }
3012
3013 void CodeBlock::clearEvalCache()
3014 {
3015     if (!!m_alternative)
3016         m_alternative->clearEvalCache();
3017     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
3018         otherBlock->clearEvalCache();
3019     if (!m_rareData)
3020         return;
3021     m_rareData->m_evalCodeCache.clear();
3022 }
3023
3024 void CodeBlock::install()
3025 {
3026     ownerExecutable()->installCode(this);
3027 }
3028
3029 PassRefPtr<CodeBlock> CodeBlock::newReplacement()
3030 {
3031     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
3032 }
3033
3034 #if ENABLE(JIT)
3035 CodeBlock* ProgramCodeBlock::replacement()
3036 {
3037     return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
3038 }
3039
3040 CodeBlock* EvalCodeBlock::replacement()
3041 {
3042     return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
3043 }
3044
3045 CodeBlock* FunctionCodeBlock::replacement()
3046 {
3047     return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
3048 }
3049
3050 DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
3051 {
3052     return DFG::programCapabilityLevel(this);
3053 }
3054
3055 DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
3056 {
3057     return DFG::evalCapabilityLevel(this);
3058 }
3059
3060 DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
3061 {
3062     if (m_isConstructor)
3063         return DFG::functionForConstructCapabilityLevel(this);
3064     return DFG::functionForCallCapabilityLevel(this);
3065 }
3066 #endif
3067
3068 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
3069 {
3070     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
3071     
3072 #if ENABLE(DFG_JIT)
3073     if (DFG::shouldShowDisassembly()) {
3074         dataLog("Jettisoning ", *this);
3075         if (mode == CountReoptimization)
3076             dataLog(" and counting reoptimization");
3077         dataLog(" due to ", reason);
3078         if (detail)
3079             dataLog(", ", *detail);
3080         dataLog(".\n");
3081     }
3082     
3083     DeferGCForAWhile deferGC(*m_heap);
3084     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
3085     
3086     if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
3087         compilation->setJettisonReason(reason, detail);
3088     
3089     // We want to accomplish two things here:
3090     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
3091     //    we should OSR exit at the top of the next bytecode instruction after the return.
3092     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
3093     
3094     // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
3095     // whether the invalidation has already happened.
3096     if (!jitCode()->dfgCommon()->invalidate()) {
3097         // Nothing to do since we've already been invalidated. That means that we cannot be
3098         // the optimized replacement.
3099         RELEASE_ASSERT(this != replacement());
3100         return;
3101     }
3102     
3103     if (DFG::shouldShowDisassembly())
3104         dataLog("    Did invalidate ", *this, "\n");
3105     
3106     // Count the reoptimization if that's what the user wanted.
3107     if (mode == CountReoptimization) {
3108         // FIXME: Maybe this should call alternative().
3109         // https://bugs.webkit.org/show_bug.cgi?id=123677
3110         baselineAlternative()->countReoptimization();
3111         if (DFG::shouldShowDisassembly())
3112             dataLog("    Did count reoptimization for ", *this, "\n");
3113     }
3114     
3115     // Now take care of the entrypoint.
3116     if (this != replacement()) {
3117         // This means that we were never the entrypoint. This can happen for OSR entry code
3118         // blocks.
3119         return;
3120     }
3121     alternative()->optimizeAfterWarmUp();
3122     tallyFrequentExitSites();
3123     alternative()->install();
3124     if (DFG::shouldShowDisassembly())
3125         dataLog("    Did install baseline version of ", *this, "\n");
3126 #else // ENABLE(DFG_JIT)
3127     UNUSED_PARAM(mode);
3128     UNUSED_PARAM(detail);
3129     UNREACHABLE_FOR_PLATFORM();
3130 #endif // ENABLE(DFG_JIT)
3131 }
3132
3133 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
3134 {
3135     if (!codeOrigin.inlineCallFrame)
3136         return globalObject();
3137     return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
3138 }
3139
3140 class RecursionCheckFunctor {
3141 public:
3142     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
3143         : m_startCallFrame(startCallFrame)
3144         , m_codeBlock(codeBlock)
3145         , m_depthToCheck(depthToCheck)
3146         , m_foundStartCallFrame(false)
3147         , m_didRecurse(false)
3148     { }
3149
3150     StackVisitor::Status operator()(StackVisitor& visitor)
3151     {
3152         CallFrame* currentCallFrame = visitor->callFrame();
3153
3154         if (currentCallFrame == m_startCallFrame)
3155             m_foundStartCallFrame = true;
3156
3157         if (m_foundStartCallFrame) {
3158             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
3159                 m_didRecurse = true;
3160                 return StackVisitor::Done;
3161             }
3162
3163             if (!m_depthToCheck--)
3164                 return StackVisitor::Done;
3165         }
3166
3167         return StackVisitor::Continue;
3168     }
3169
3170     bool didRecurse() const { return m_didRecurse; }
3171
3172 private:
3173     CallFrame* m_startCallFrame;
3174     CodeBlock* m_codeBlock;
3175     unsigned m_depthToCheck;
3176     bool m_foundStartCallFrame;
3177     bool m_didRecurse;
3178 };
3179
3180 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
3181 {
3182     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
3183     
3184     if (Options::verboseCallLink())
3185         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
3186     
3187 #if ENABLE(DFG_JIT)
3188     if (!m_shouldAlwaysBeInlined)
3189         return;
3190     
3191     if (!callerCodeBlock) {
3192         m_shouldAlwaysBeInlined = false;
3193         if (Options::verboseCallLink())
3194             dataLog("    Clearing SABI because caller is native.\n");
3195         return;
3196     }
3197
3198     if (!hasBaselineJITProfiling())
3199         return;
3200
3201     if (!DFG::mightInlineFunction(this))
3202         return;
3203
3204     if (!canInline(m_capabilityLevelState))
3205         return;
3206     
3207     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
3208         m_shouldAlwaysBeInlined = false;
3209         if (Options::verboseCallLink())
3210             dataLog("    Clearing SABI because caller is too large.\n");
3211         return;
3212     }
3213
3214     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
3215         // If the caller is still in the interpreter, then we can't expect inlining to
3216         // happen anytime soon. Assume it's profitable to optimize it separately. This
3217         // ensures that a function is SABI only if it is called no more frequently than
3218         // any of its callers.
3219         m_shouldAlwaysBeInlined = false;
3220         if (Options::verboseCallLink())
3221             dataLog("    Clearing SABI because caller is in LLInt.\n");
3222         return;
3223     }
3224     
3225     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
3226         m_shouldAlwaysBeInlined = false;
3227         if (Options::verboseCallLink())
3228             dataLog("    Clearing SABI bcause caller was already optimized.\n");
3229         return;
3230     }
3231     
3232     if (callerCodeBlock->codeType() != FunctionCode) {
3233         // If the caller is either eval or global code, assume that that won't be
3234         // optimized anytime soon. For eval code this is particularly true since we
3235         // delay eval optimization by a *lot*.
3236         m_shouldAlwaysBeInlined = false;
3237         if (Options::verboseCallLink())
3238             dataLog("    Clearing SABI because caller is not a function.\n");
3239         return;
3240     }
3241
3242     // Recursive calls won't be inlined.
3243     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
3244     vm()->topCallFrame->iterate(functor);
3245
3246     if (functor.didRecurse()) {
3247         if (Options::verboseCallLink())
3248             dataLog("    Clearing SABI because recursion was detected.\n");
3249         m_shouldAlwaysBeInlined = false;
3250         return;
3251     }
3252     
3253     if (callerCodeBlock->m_capabilityLevelState == DFG::CapabilityLevelNotSet) {
3254         dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
3255         CRASH();
3256     }
3257     
3258     if (canCompile(callerCodeBlock->m_capabilityLevelState))
3259         return;
3260     
3261     if (Options::verboseCallLink())
3262         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
3263     
3264     m_shouldAlwaysBeInlined = false;
3265 #endif
3266 }
3267
3268 unsigned CodeBlock::reoptimizationRetryCounter() const
3269 {
3270 #if ENABLE(JIT)
3271     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
3272     return m_reoptimizationRetryCounter;
3273 #else
3274     return 0;
3275 #endif // ENABLE(JIT)
3276 }
3277
3278 #if ENABLE(JIT)
3279 void CodeBlock::countReoptimization()
3280 {
3281     m_reoptimizationRetryCounter++;
3282     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
3283         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
3284 }
3285
3286 unsigned CodeBlock::numberOfDFGCompiles()
3287 {
3288     ASSERT(JITCode::isBaselineCode(jitType()));
3289     if (Options::testTheFTL()) {
3290         if (m_didFailFTLCompilation)
3291             return 1000000;
3292         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
3293     }
3294     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
3295 }
3296
3297 int32_t CodeBlock::codeTypeThresholdMultiplier() const
3298 {
3299     if (codeType() == EvalCode)
3300         return Options::evalThresholdMultiplier();
3301     
3302     return 1;
3303 }
3304
3305 double CodeBlock::optimizationThresholdScalingFactor()
3306 {
3307     // This expression arises from doing a least-squares fit of
3308     //
3309     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
3310     //
3311     // against the data points:
3312     //
3313     //    x       F[x_]
3314     //    10       0.9          (smallest reasonable code block)
3315     //   200       1.0          (typical small-ish code block)
3316     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
3317     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
3318     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
3319     // 10000       6.0          (similar to above)
3320     //
3321     // I achieve the minimization using the following Mathematica code:
3322     //
3323     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
3324     //
3325     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
3326     //
3327     // solution = 
3328     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
3329     //         {a, b, c, d}][[2]]
3330     //
3331     // And the code below (to initialize a, b, c, d) is generated by:
3332     //
3333     // Print["const double " <> ToString[#[[1]]] <> " = " <>
3334     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
3335     //
3336     // We've long known the following to be true:
3337     // - Small code blocks are cheap to optimize and so we should do it sooner rather
3338     //   than later.
3339     // - Large code blocks are expensive to optimize and so we should postpone doing so,
3340     //   and sometimes have a large enough threshold that we never optimize them.
3341     // - The difference in cost is not totally linear because (a) just invoking the
3342     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
3343     //   in the correlation between instruction count and the actual compilation cost
3344     //   that for those large blocks, the instruction count should not have a strong
3345     //   influence on our threshold.
3346     //
3347     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
3348     // example where the heuristics were right (code block in 3d-cube with instruction
3349     // count 320, which got compiled early as it should have been) and one where they were
3350     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
3351     // to compile and didn't run often enough to warrant compilation in my opinion), and
3352     // then threw in additional data points that represented my own guess of what our
3353     // heuristics should do for some round-numbered examples.
3354     //
3355     // The expression to which I decided to fit the data arose because I started with an
3356     // affine function, and then did two things: put the linear part in an Abs to ensure
3357     // that the fit didn't end up choosing a negative value of c (which would result in
3358     // the function turning over and going negative for large x) and I threw in a Sqrt
3359     // term because Sqrt represents my intution that the function should be more sensitive
3360     // to small changes in small values of x, but less sensitive when x gets large.
3361     
3362     // Note that the current fit essentially eliminates the linear portion of the
3363     // expression (c == 0.0).
3364     const double a = 0.061504;
3365     const double b = 1.02406;
3366     const double c = 0.0;
3367     const double d = 0.825914;
3368     
3369     double instructionCount = this->instructionCount();
3370     
3371     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
3372     
3373     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
3374     
3375     result *= codeTypeThresholdMultiplier();
3376     
3377     if (Options::verboseOSR()) {
3378         dataLog(
3379             *this, ": instruction count is ", instructionCount,
3380             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
3381             "\n");
3382     }
3383     return result;
3384 }
3385
3386 static int32_t clipThreshold(double threshold)
3387 {
3388     if (threshold < 1.0)
3389         return 1;
3390     
3391     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
3392         return std::numeric_limits<int32_t>::max();
3393     
3394     return static_cast<int32_t>(threshold);
3395 }
3396
3397 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
3398 {
3399     return clipThreshold(
3400         static_cast<double>(desiredThreshold) *
3401         optimizationThresholdScalingFactor() *
3402         (1 << reoptimizationRetryCounter()));
3403 }
3404
3405 bool CodeBlock::checkIfOptimizationThresholdReached()
3406 {
3407 #if ENABLE(DFG_JIT)
3408     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
3409         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
3410             == DFG::Worklist::Compiled) {
3411             optimizeNextInvocation();
3412             return true;
3413         }
3414     }
3415 #endif
3416     
3417     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
3418 }
3419
3420 void CodeBlock::optimizeNextInvocation()
3421 {
3422     if (Options::verboseOSR())
3423         dataLog(*this, ": Optimizing next invocation.\n");
3424     m_jitExecuteCounter.setNewThreshold(0, this);
3425 }
3426
3427 void CodeBlock::dontOptimizeAnytimeSoon()
3428 {
3429     if (Options::verboseOSR())
3430         dataLog(*this, ": Not optimizing anytime soon.\n");
3431     m_jitExecuteCounter.deferIndefinitely();
3432 }
3433
3434 void CodeBlock::optimizeAfterWarmUp()
3435 {
3436     if (Options::verboseOSR())
3437         dataLog(*this, ": Optimizing after warm-up.\n");
3438 #if ENABLE(DFG_JIT)
3439     m_jitExecuteCounter.setNewThreshold(
3440         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
3441 #endif
3442 }
3443
3444 void CodeBlock::optimizeAfterLongWarmUp()
3445 {
3446     if (Options::verboseOSR())
3447         dataLog(*this, ": Optimizing after long warm-up.\n");
3448 #if ENABLE(DFG_JIT)
3449     m_jitExecuteCounter.setNewThreshold(
3450         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
3451 #endif
3452 }
3453
3454 void CodeBlock::optimizeSoon()
3455 {
3456     if (Options::verboseOSR())
3457         dataLog(*this, ": Optimizing soon.\n");
3458 #if ENABLE(DFG_JIT)
3459     m_jitExecuteCounter.setNewThreshold(
3460         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
3461 #endif
3462 }
3463
3464 void CodeBlock::forceOptimizationSlowPathConcurrently()
3465 {
3466     if (Options::verboseOSR())
3467         dataLog(*this, ": Forcing slow path concurrently.\n");
3468     m_jitExecuteCounter.forceSlowPathConcurrently();
3469 }
3470
3471 #if ENABLE(DFG_JIT)
3472 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
3473 {
3474     JITCode::JITType type = jitType();
3475     if (type != JITCode::BaselineJIT) {
3476         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
3477         RELEASE_ASSERT_NOT_REACHED();
3478     }
3479     
3480     CodeBlock* theReplacement = replacement();
3481     if ((result == CompilationSuccessful) != (theReplacement != this)) {
3482         dataLog(*this, ": we have result = ", result, " but ");
3483         if (theReplacement == this)
3484             dataLog("we are our own replacement.\n");
3485         else
3486             dataLog("our replacement is ", pointerDump(theReplacement), "\n");
3487         RELEASE_ASSERT_NOT_REACHED();
3488     }
3489     
3490     switch (result) {
3491     case CompilationSuccessful:
3492         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3493         optimizeNextInvocation();
3494         return;
3495     case CompilationFailed:
3496         dontOptimizeAnytimeSoon();
3497         return;
3498     case CompilationDeferred:
3499         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3500         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3501         // necessarily guarantee anything. So, we make sure that even if that
3502         // function ends up being a no-op, we still eventually retry and realize
3503         // that we have optimized code ready.
3504         optimizeAfterWarmUp();
3505         return;
3506     case CompilationInvalidated:
3507         // Retry with exponential backoff.
3508         countReoptimization();
3509         optimizeAfterWarmUp();
3510         return;
3511     }
3512     
3513     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
3514     RELEASE_ASSERT_NOT_REACHED();
3515 }
3516
3517 #endif
3518     
3519 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
3520 {
3521     ASSERT(JITCode::isOptimizingJIT(jitType()));
3522     // Compute this the lame way so we don't saturate. This is called infrequently
3523     // enough that this loop won't hurt us.
3524     unsigned result = desiredThreshold;
3525     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
3526         unsigned newResult = result << 1;
3527         if (newResult < result)
3528             return std::numeric_limits<uint32_t>::max();
3529         result = newResult;
3530     }
3531     return result;
3532 }
3533
3534 uint32_t CodeBlock::exitCountThresholdForReoptimization()
3535 {
3536     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3537 }
3538
3539 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3540 {
3541     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3542 }
3543
3544 bool CodeBlock::shouldReoptimizeNow()
3545 {
3546     return osrExitCounter() >= exitCountThresholdForReoptimization();
3547 }
3548
3549 bool CodeBlock::shouldReoptimizeFromLoopNow()
3550 {
3551     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3552 }
3553 #endif
3554
3555 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
3556 {
3557     for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
3558         if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
3559             return &m_arrayProfiles[i];
3560     }
3561     return 0;
3562 }
3563
3564 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
3565 {
3566     ArrayProfile* result = getArrayProfile(bytecodeOffset);
3567     if (result)
3568         return result;
3569     return addArrayProfile(bytecodeOffset);
3570 }
3571
3572 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
3573 {
3574     ConcurrentJITLocker locker(m_lock);
3575     
3576     numberOfLiveNonArgumentValueProfiles = 0;
3577     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3578     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3579         ValueProfile* profile = getFromAllValueProfiles(i);
3580         unsigned numSamples = profile->totalNumberOfSamples();
3581         if (numSamples > ValueProfile::numberOfBuckets)
3582             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
3583         numberOfSamplesInProfiles += numSamples;
3584         if (profile->m_bytecodeOffset < 0) {
3585             profile->computeUpdatedPrediction(locker);
3586             continue;
3587         }
3588         if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
3589             numberOfLiveNonArgumentValueProfiles++;
3590         profile->computeUpdatedPrediction(locker);
3591     }
3592     
3593 #if ENABLE(DFG_JIT)
3594     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
3595 #endif
3596 }
3597
3598 void CodeBlock::updateAllValueProfilePredictions()
3599 {
3600     unsigned ignoredValue1, ignoredValue2;
3601     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
3602 }
3603
3604 void CodeBlock::updateAllArrayPredictions()
3605 {
3606     ConcurrentJITLocker locker(m_lock);
3607     
3608     for (unsigned i = m_arrayProfiles.size(); i--;)
3609         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
3610     
3611     // Don't count these either, for similar reasons.
3612     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
3613         m_arrayAllocationProfiles[i].updateIndexingType();
3614 }
3615
3616 void CodeBlock::updateAllPredictions()
3617 {
3618     updateAllValueProfilePredictions();
3619     updateAllArrayPredictions();
3620 }
3621
3622 bool CodeBlock::shouldOptimizeNow()
3623 {
3624     if (Options::verboseOSR())
3625         dataLog("Considering optimizing ", *this, "...\n");
3626
3627     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
3628         return true;
3629     
3630     updateAllArrayPredictions();
3631     
3632     unsigned numberOfLiveNonArgumentValueProfiles;
3633     unsigned numberOfSamplesInProfiles;
3634     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
3635
3636     if (Options::verboseOSR()) {
3637         dataLogF(
3638             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
3639             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
3640             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
3641             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
3642             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
3643     }
3644
3645     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
3646         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
3647         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
3648         return true;
3649     
3650     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
3651     m_optimizationDelayCounter++;
3652     optimizeAfterWarmUp();
3653     return false;
3654 }
3655
3656 #if ENABLE(DFG_JIT)
3657 void CodeBlock::tallyFrequentExitSites()
3658 {
3659     ASSERT(JITCode::isOptimizingJIT(jitType()));
3660     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
3661     
3662     CodeBlock* profiledBlock = alternative();
3663     
3664     switch (jitType()) {
3665     case JITCode::DFGJIT: {
3666         DFG::JITCode* jitCode = m_jitCode->dfg();
3667         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3668             DFG::OSRExit& exit = jitCode->osrExit[i];
3669             exit.considerAddingAsFrequentExitSite(profiledBlock);
3670         }
3671         break;
3672     }
3673
3674 #if ENABLE(FTL_JIT)
3675     case JITCode::FTLJIT: {
3676         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
3677         // vector contains a totally different type, that just so happens to behave like
3678         // DFG::JITCode::osrExit.
3679         FTL::JITCode* jitCode = m_jitCode->ftl();
3680         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3681             FTL::OSRExit& exit = jitCode->osrExit[i];
3682             exit.considerAddingAsFrequentExitSite(profiledBlock);
3683         }
3684         break;
3685     }
3686 #endif
3687         
3688     default:
3689         RELEASE_ASSERT_NOT_REACHED();
3690         break;
3691     }
3692 }
3693 #endif // ENABLE(DFG_JIT)
3694
3695 #if ENABLE(VERBOSE_VALUE_PROFILE)
3696 void CodeBlock::dumpValueProfiles()
3697 {
3698     dataLog("ValueProfile for ", *this, ":\n");
3699     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3700         ValueProfile* profile = getFromAllValueProfiles(i);
3701         if (profile->m_bytecodeOffset < 0) {
3702             ASSERT(profile->m_bytecodeOffset == -1);
3703             dataLogF("   arg = %u: ", i);
3704         } else
3705             dataLogF("   bc = %d: ", profile->m_bytecodeOffset);
3706         if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
3707             dataLogF("<empty>\n");
3708             continue;
3709         }
3710         profile->dump(WTF::dataFile());
3711         dataLogF("\n");
3712     }
3713     dataLog("RareCaseProfile for ", *this, ":\n");
3714     for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
3715         RareCaseProfile* profile = rareCaseProfile(i);
3716         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
3717     }
3718     dataLog("SpecialFastCaseProfile for ", *this, ":\n");
3719     for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
3720         RareCaseProfile* profile = specialFastCaseProfile(i);
3721         dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
3722     }
3723 }
3724 #endif // ENABLE(VERBOSE_VALUE_PROFILE)
3725
3726 unsigned CodeBlock::frameRegisterCount()
3727 {
3728     switch (jitType()) {
3729     case JITCode::InterpreterThunk:
3730         return LLInt::frameRegisterCountFor(this);
3731
3732 #if ENABLE(JIT)
3733     case JITCode::BaselineJIT:
3734         return JIT::frameRegisterCountFor(this);
3735 #endif // ENABLE(JIT)
3736
3737 #if ENABLE(DFG_JIT)
3738     case JITCode::DFGJIT: