4d0c07341ddf98329af5b24b58ba92678ec4c9db
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.cpp
1 /*
2  * Copyright (C) 2008-2010, 2012-2015 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include "config.h"
31 #include "CodeBlock.h"
32
33 #include "BasicBlockLocation.h"
34 #include "BytecodeGenerator.h"
35 #include "BytecodeUseDef.h"
36 #include "CallLinkStatus.h"
37 #include "DFGCapabilities.h"
38 #include "DFGCommon.h"
39 #include "DFGDriver.h"
40 #include "DFGJITCode.h"
41 #include "DFGWorklist.h"
42 #include "Debugger.h"
43 #include "FunctionExecutableDump.h"
44 #include "Interpreter.h"
45 #include "JIT.h"
46 #include "JITStubs.h"
47 #include "JSCJSValue.h"
48 #include "JSFunction.h"
49 #include "JSLexicalEnvironment.h"
50 #include "JSNameScope.h"
51 #include "LLIntEntrypoint.h"
52 #include "LowLevelInterpreter.h"
53 #include "JSCInlines.h"
54 #include "PolymorphicGetByIdList.h"
55 #include "PolymorphicPutByIdList.h"
56 #include "ProfilerDatabase.h"
57 #include "ReduceWhitespace.h"
58 #include "Repatch.h"
59 #include "RepatchBuffer.h"
60 #include "SlotVisitorInlines.h"
61 #include "StackVisitor.h"
62 #include "TypeLocationCache.h"
63 #include "TypeProfiler.h"
64 #include "UnlinkedInstructionStream.h"
65 #include <wtf/BagToHashMap.h>
66 #include <wtf/CommaPrinter.h>
67 #include <wtf/StringExtras.h>
68 #include <wtf/StringPrintStream.h>
69 #include <wtf/text/UniquedStringImpl.h>
70
71 #if ENABLE(DFG_JIT)
72 #include "DFGOperations.h"
73 #endif
74
75 #if ENABLE(FTL_JIT)
76 #include "FTLJITCode.h"
77 #endif
78
79 namespace JSC {
80
81 CString CodeBlock::inferredName() const
82 {
83     switch (codeType()) {
84     case GlobalCode:
85         return "<global>";
86     case EvalCode:
87         return "<eval>";
88     case FunctionCode:
89         return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
90     default:
91         CRASH();
92         return CString("", 0);
93     }
94 }
95
96 bool CodeBlock::hasHash() const
97 {
98     return !!m_hash;
99 }
100
101 bool CodeBlock::isSafeToComputeHash() const
102 {
103     return !isCompilationThread();
104 }
105
106 CodeBlockHash CodeBlock::hash() const
107 {
108     if (!m_hash) {
109         RELEASE_ASSERT(isSafeToComputeHash());
110         m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
111     }
112     return m_hash;
113 }
114
115 CString CodeBlock::sourceCodeForTools() const
116 {
117     if (codeType() != FunctionCode)
118         return ownerExecutable()->source().toUTF8();
119     
120     SourceProvider* provider = source();
121     FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
122     UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
123     unsigned unlinkedStartOffset = unlinked->startOffset();
124     unsigned linkedStartOffset = executable->source().startOffset();
125     int delta = linkedStartOffset - unlinkedStartOffset;
126     unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
127     unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
128     return toCString(
129         "function ",
130         provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
131 }
132
133 CString CodeBlock::sourceCodeOnOneLine() const
134 {
135     return reduceWhitespace(sourceCodeForTools());
136 }
137
138 CString CodeBlock::hashAsStringIfPossible() const
139 {
140     if (hasHash() || isSafeToComputeHash())
141         return toCString(hash());
142     return "<no-hash>";
143 }
144
145 void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
146 {
147     out.print(inferredName(), "#", hashAsStringIfPossible());
148     out.print(":[", RawPointer(this), "->");
149     if (!!m_alternative)
150         out.print(RawPointer(m_alternative.get()), "->");
151     out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
152
153     if (codeType() == FunctionCode)
154         out.print(specializationKind());
155     out.print(", ", instructionCount());
156     if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
157         out.print(" (ShouldAlwaysBeInlined)");
158     if (ownerExecutable()->neverInline())
159         out.print(" (NeverInline)");
160     if (ownerExecutable()->didTryToEnterInLoop())
161         out.print(" (DidTryToEnterInLoop)");
162     if (ownerExecutable()->isStrictMode())
163         out.print(" (StrictMode)");
164     if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
165         out.print(" (FTLFail)");
166     if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
167         out.print(" (HadFTLReplacement)");
168     out.print("]");
169 }
170
171 void CodeBlock::dump(PrintStream& out) const
172 {
173     dumpAssumingJITType(out, jitType());
174 }
175
176 static CString idName(int id0, const Identifier& ident)
177 {
178     return toCString(ident.impl(), "(@id", id0, ")");
179 }
180
181 CString CodeBlock::registerName(int r) const
182 {
183     if (isConstantRegisterIndex(r))
184         return constantName(r);
185
186     return toCString(VirtualRegister(r));
187 }
188
189 CString CodeBlock::constantName(int index) const
190 {
191     JSValue value = getConstant(index);
192     return toCString(value, "(", VirtualRegister(index), ")");
193 }
194
195 static CString regexpToSourceString(RegExp* regExp)
196 {
197     char postfix[5] = { '/', 0, 0, 0, 0 };
198     int index = 1;
199     if (regExp->global())
200         postfix[index++] = 'g';
201     if (regExp->ignoreCase())
202         postfix[index++] = 'i';
203     if (regExp->multiline())
204         postfix[index] = 'm';
205
206     return toCString("/", regExp->pattern().impl(), postfix);
207 }
208
209 static CString regexpName(int re, RegExp* regexp)
210 {
211     return toCString(regexpToSourceString(regexp), "(@re", re, ")");
212 }
213
214 NEVER_INLINE static const char* debugHookName(int debugHookID)
215 {
216     switch (static_cast<DebugHookID>(debugHookID)) {
217         case DidEnterCallFrame:
218             return "didEnterCallFrame";
219         case WillLeaveCallFrame:
220             return "willLeaveCallFrame";
221         case WillExecuteStatement:
222             return "willExecuteStatement";
223         case WillExecuteProgram:
224             return "willExecuteProgram";
225         case DidExecuteProgram:
226             return "didExecuteProgram";
227         case DidReachBreakpoint:
228             return "didReachBreakpoint";
229     }
230
231     RELEASE_ASSERT_NOT_REACHED();
232     return "";
233 }
234
235 void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
236 {
237     int r0 = (++it)->u.operand;
238     int r1 = (++it)->u.operand;
239
240     printLocationAndOp(out, exec, location, it, op);
241     out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
242 }
243
244 void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
245 {
246     int r0 = (++it)->u.operand;
247     int r1 = (++it)->u.operand;
248     int r2 = (++it)->u.operand;
249     printLocationAndOp(out, exec, location, it, op);
250     out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
251 }
252
253 void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
254 {
255     int r0 = (++it)->u.operand;
256     int offset = (++it)->u.operand;
257     printLocationAndOp(out, exec, location, it, op);
258     out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
259 }
260
261 void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
262 {
263     const char* op;
264     switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
265     case op_get_by_id:
266         op = "get_by_id";
267         break;
268     case op_get_by_id_out_of_line:
269         op = "get_by_id_out_of_line";
270         break;
271     case op_get_array_length:
272         op = "array_length";
273         break;
274     default:
275         RELEASE_ASSERT_NOT_REACHED();
276 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
277         op = 0;
278 #endif
279     }
280     int r0 = (++it)->u.operand;
281     int r1 = (++it)->u.operand;
282     int id0 = (++it)->u.operand;
283     printLocationAndOp(out, exec, location, it, op);
284     out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
285     it += 4; // Increment up to the value profiler.
286 }
287
288 static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident)
289 {
290     if (!structure)
291         return;
292     
293     out.printf("%s = %p", name, structure);
294     
295     PropertyOffset offset = structure->getConcurrently(ident.impl());
296     if (offset != invalidOffset)
297         out.printf(" (offset = %d)", offset);
298 }
299
300 static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident)
301 {
302     out.printf("chain = %p: [", chain);
303     bool first = true;
304     for (WriteBarrier<Structure>* currentStructure = chain->head();
305          *currentStructure;
306          ++currentStructure) {
307         if (first)
308             first = false;
309         else
310             out.printf(", ");
311         dumpStructure(out, "struct", currentStructure->get(), ident);
312     }
313     out.printf("]");
314 }
315
316 void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
317 {
318     Instruction* instruction = instructions().begin() + location;
319
320     const Identifier& ident = identifier(instruction[3].u.operand);
321     
322     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
323     
324     if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
325         out.printf(" llint(array_length)");
326     else if (Structure* structure = instruction[4].u.structure.get()) {
327         out.printf(" llint(");
328         dumpStructure(out, "struct", structure, ident);
329         out.printf(")");
330     }
331
332 #if ENABLE(JIT)
333     if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
334         StructureStubInfo& stubInfo = *stubPtr;
335         if (stubInfo.resetByGC)
336             out.print(" (Reset By GC)");
337         
338         if (stubInfo.seen) {
339             out.printf(" jit(");
340             
341             Structure* baseStructure = 0;
342             Structure* prototypeStructure = 0;
343             StructureChain* chain = 0;
344             PolymorphicGetByIdList* list = 0;
345             
346             switch (stubInfo.accessType) {
347             case access_get_by_id_self:
348                 out.printf("self");
349                 baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
350                 break;
351             case access_get_by_id_list:
352                 out.printf("list");
353                 list = stubInfo.u.getByIdList.list;
354                 break;
355             case access_unset:
356                 out.printf("unset");
357                 break;
358             default:
359                 RELEASE_ASSERT_NOT_REACHED();
360                 break;
361             }
362             
363             if (baseStructure) {
364                 out.printf(", ");
365                 dumpStructure(out, "struct", baseStructure, ident);
366             }
367             
368             if (prototypeStructure) {
369                 out.printf(", ");
370                 dumpStructure(out, "prototypeStruct", baseStructure, ident);
371             }
372             
373             if (chain) {
374                 out.printf(", ");
375                 dumpChain(out, chain, ident);
376             }
377             
378             if (list) {
379                 out.printf(", list = %p: [", list);
380                 for (unsigned i = 0; i < list->size(); ++i) {
381                     if (i)
382                         out.printf(", ");
383                     out.printf("(");
384                     dumpStructure(out, "base", list->at(i).structure(), ident);
385                     if (list->at(i).chain()) {
386                         out.printf(", ");
387                         dumpChain(out, list->at(i).chain(), ident);
388                     }
389                     out.printf(")");
390                 }
391                 out.printf("]");
392             }
393             out.printf(")");
394         }
395     }
396 #else
397     UNUSED_PARAM(map);
398 #endif
399 }
400
401 void CodeBlock::printPutByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
402 {
403     Instruction* instruction = instructions().begin() + location;
404
405     const Identifier& ident = identifier(instruction[2].u.operand);
406     
407     UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
408     
409     if (Structure* structure = instruction[4].u.structure.get()) {
410         switch (exec->interpreter()->getOpcodeID(instruction[0].u.opcode)) {
411         case op_put_by_id:
412         case op_put_by_id_out_of_line:
413             out.print(" llint(");
414             dumpStructure(out, "struct", structure, ident);
415             out.print(")");
416             break;
417             
418         case op_put_by_id_transition_direct:
419         case op_put_by_id_transition_normal:
420         case op_put_by_id_transition_direct_out_of_line:
421         case op_put_by_id_transition_normal_out_of_line:
422             out.print(" llint(");
423             dumpStructure(out, "prev", structure, ident);
424             out.print(", ");
425             dumpStructure(out, "next", instruction[6].u.structure.get(), ident);
426             if (StructureChain* chain = instruction[7].u.structureChain.get()) {
427                 out.print(", ");
428                 dumpChain(out, chain, ident);
429             }
430             out.print(")");
431             break;
432             
433         default:
434             out.print(" llint(unknown)");
435             break;
436         }
437     }
438
439 #if ENABLE(JIT)
440     if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
441         StructureStubInfo& stubInfo = *stubPtr;
442         if (stubInfo.resetByGC)
443             out.print(" (Reset By GC)");
444         
445         if (stubInfo.seen) {
446             out.printf(" jit(");
447             
448             switch (stubInfo.accessType) {
449             case access_put_by_id_replace:
450                 out.print("replace, ");
451                 dumpStructure(out, "struct", stubInfo.u.putByIdReplace.baseObjectStructure.get(), ident);
452                 break;
453             case access_put_by_id_transition_normal:
454             case access_put_by_id_transition_direct:
455                 out.print("transition, ");
456                 dumpStructure(out, "prev", stubInfo.u.putByIdTransition.previousStructure.get(), ident);
457                 out.print(", ");
458                 dumpStructure(out, "next", stubInfo.u.putByIdTransition.structure.get(), ident);
459                 if (StructureChain* chain = stubInfo.u.putByIdTransition.chain.get()) {
460                     out.print(", ");
461                     dumpChain(out, chain, ident);
462                 }
463                 break;
464             case access_put_by_id_list: {
465                 out.printf("list = [");
466                 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
467                 CommaPrinter comma;
468                 for (unsigned i = 0; i < list->size(); ++i) {
469                     out.print(comma, "(");
470                     const PutByIdAccess& access = list->at(i);
471                     
472                     if (access.isReplace()) {
473                         out.print("replace, ");
474                         dumpStructure(out, "struct", access.oldStructure(), ident);
475                     } else if (access.isSetter()) {
476                         out.print("setter, ");
477                         dumpStructure(out, "struct", access.oldStructure(), ident);
478                     } else if (access.isCustom()) {
479                         out.print("custom, ");
480                         dumpStructure(out, "struct", access.oldStructure(), ident);
481                     } else if (access.isTransition()) {
482                         out.print("transition, ");
483                         dumpStructure(out, "prev", access.oldStructure(), ident);
484                         out.print(", ");
485                         dumpStructure(out, "next", access.newStructure(), ident);
486                         if (access.chain()) {
487                             out.print(", ");
488                             dumpChain(out, access.chain(), ident);
489                         }
490                     } else
491                         out.print("unknown");
492                     
493                     out.print(")");
494                 }
495                 out.print("]");
496                 break;
497             }
498             case access_unset:
499                 out.printf("unset");
500                 break;
501             default:
502                 RELEASE_ASSERT_NOT_REACHED();
503                 break;
504             }
505             out.printf(")");
506         }
507     }
508 #else
509     UNUSED_PARAM(map);
510 #endif
511 }
512
513 void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
514 {
515     int dst = (++it)->u.operand;
516     int func = (++it)->u.operand;
517     int argCount = (++it)->u.operand;
518     int registerOffset = (++it)->u.operand;
519     printLocationAndOp(out, exec, location, it, op);
520     out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
521     if (cacheDumpMode == DumpCaches) {
522         LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
523         if (callLinkInfo->lastSeenCallee) {
524             out.printf(
525                 " llint(%p, exec %p)",
526                 callLinkInfo->lastSeenCallee.get(),
527                 callLinkInfo->lastSeenCallee->executable());
528         }
529 #if ENABLE(JIT)
530         if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
531             JSFunction* target = info->lastSeenCallee.get();
532             if (target)
533                 out.printf(" jit(%p, exec %p)", target, target->executable());
534         }
535         
536         if (jitType() != JITCode::FTLJIT)
537             out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
538 #else
539         UNUSED_PARAM(map);
540 #endif
541     }
542     ++it;
543     ++it;
544     dumpArrayProfiling(out, it, hasPrintedProfiling);
545     dumpValueProfiling(out, it, hasPrintedProfiling);
546 }
547
548 void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
549 {
550     int r0 = (++it)->u.operand;
551     int id0 = (++it)->u.operand;
552     int r1 = (++it)->u.operand;
553     printLocationAndOp(out, exec, location, it, op);
554     out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
555     it += 5;
556 }
557
558 void CodeBlock::dumpSource()
559 {
560     dumpSource(WTF::dataFile());
561 }
562
563 void CodeBlock::dumpSource(PrintStream& out)
564 {
565     ScriptExecutable* executable = ownerExecutable();
566     if (executable->isFunctionExecutable()) {
567         FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable);
568         String source = functionExecutable->source().provider()->getRange(
569             functionExecutable->parametersStartOffset(),
570             functionExecutable->typeProfilingEndOffset() + 1); // Type profiling end offset is the character before the '}'.
571         
572         out.print("function ", inferredName(), source);
573         return;
574     }
575     out.print(executable->source().toString());
576 }
577
578 void CodeBlock::dumpBytecode()
579 {
580     dumpBytecode(WTF::dataFile());
581 }
582
583 void CodeBlock::dumpBytecode(PrintStream& out)
584 {
585     // We only use the ExecState* for things that don't actually lead to JS execution,
586     // like converting a JSString to a String. Hence the globalExec is appropriate.
587     ExecState* exec = m_globalObject->globalExec();
588     
589     size_t instructionCount = 0;
590
591     for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
592         ++instructionCount;
593
594     out.print(*this);
595     out.printf(
596         ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
597         static_cast<unsigned long>(instructions().size()),
598         static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
599         m_numParameters, m_numCalleeRegisters, m_numVars);
600     if (needsActivation() && codeType() == FunctionCode)
601         out.printf("; lexical environment in r%d", activationRegister().offset());
602     out.printf("\n");
603     
604     StubInfoMap stubInfos;
605     CallLinkInfoMap callLinkInfos;
606     getStubInfoMap(stubInfos);
607     getCallLinkInfoMap(callLinkInfos);
608     
609     const Instruction* begin = instructions().begin();
610     const Instruction* end = instructions().end();
611     for (const Instruction* it = begin; it != end; ++it)
612         dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
613     
614     if (numberOfIdentifiers()) {
615         out.printf("\nIdentifiers:\n");
616         size_t i = 0;
617         do {
618             out.printf("  id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
619             ++i;
620         } while (i != numberOfIdentifiers());
621     }
622
623     if (!m_constantRegisters.isEmpty()) {
624         out.printf("\nConstants:\n");
625         size_t i = 0;
626         do {
627             const char* sourceCodeRepresentationDescription = nullptr;
628             switch (m_constantsSourceCodeRepresentation[i]) {
629             case SourceCodeRepresentation::Double:
630                 sourceCodeRepresentationDescription = ": in source as double";
631                 break;
632             case SourceCodeRepresentation::Integer:
633                 sourceCodeRepresentationDescription = ": in source as integer";
634                 break;
635             case SourceCodeRepresentation::Other:
636                 sourceCodeRepresentationDescription = "";
637                 break;
638             }
639             out.printf("   k%u = %s%s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data(), sourceCodeRepresentationDescription);
640             ++i;
641         } while (i < m_constantRegisters.size());
642     }
643
644     if (size_t count = m_unlinkedCode->numberOfRegExps()) {
645         out.printf("\nm_regexps:\n");
646         size_t i = 0;
647         do {
648             out.printf("  re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
649             ++i;
650         } while (i < count);
651     }
652
653     if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
654         out.printf("\nException Handlers:\n");
655         unsigned i = 0;
656         do {
657             HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
658             out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] } %s\n",
659                 i + 1, handler.start, handler.end, handler.target, handler.scopeDepth, handler.typeName());
660             ++i;
661         } while (i < m_rareData->m_exceptionHandlers.size());
662     }
663     
664     if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
665         out.printf("Switch Jump Tables:\n");
666         unsigned i = 0;
667         do {
668             out.printf("  %1d = {\n", i);
669             int entry = 0;
670             Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
671             for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
672                 if (!*iter)
673                     continue;
674                 out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
675             }
676             out.printf("      }\n");
677             ++i;
678         } while (i < m_rareData->m_switchJumpTables.size());
679     }
680     
681     if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
682         out.printf("\nString Switch Jump Tables:\n");
683         unsigned i = 0;
684         do {
685             out.printf("  %1d = {\n", i);
686             StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
687             for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
688                 out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
689             out.printf("      }\n");
690             ++i;
691         } while (i < m_rareData->m_stringSwitchJumpTables.size());
692     }
693
694     out.printf("\n");
695 }
696
697 void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
698 {
699     if (hasPrintedProfiling) {
700         out.print("; ");
701         return;
702     }
703     
704     out.print("    ");
705     hasPrintedProfiling = true;
706 }
707
708 void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
709 {
710     ConcurrentJITLocker locker(m_lock);
711     
712     ++it;
713     CString description = it->u.profile->briefDescription(locker);
714     if (!description.length())
715         return;
716     beginDumpProfiling(out, hasPrintedProfiling);
717     out.print(description);
718 }
719
720 void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
721 {
722     ConcurrentJITLocker locker(m_lock);
723     
724     ++it;
725     if (!it->u.arrayProfile)
726         return;
727     CString description = it->u.arrayProfile->briefDescription(locker, this);
728     if (!description.length())
729         return;
730     beginDumpProfiling(out, hasPrintedProfiling);
731     out.print(description);
732 }
733
734 void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
735 {
736     if (!profile || !profile->m_counter)
737         return;
738
739     beginDumpProfiling(out, hasPrintedProfiling);
740     out.print(name, profile->m_counter);
741 }
742
743 void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
744 {
745     out.printf("[%4d] %-17s ", location, op);
746 }
747
748 void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
749 {
750     printLocationAndOp(out, exec, location, it, op);
751     out.printf("%s", registerName(operand).data());
752 }
753
754 void CodeBlock::dumpBytecode(
755     PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
756     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
757 {
758     int location = it - begin;
759     bool hasPrintedProfiling = false;
760     OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
761     switch (opcode) {
762         case op_enter: {
763             printLocationAndOp(out, exec, location, it, "enter");
764             break;
765         }
766         case op_create_lexical_environment: {
767             int r0 = (++it)->u.operand;
768             int r1 = (++it)->u.operand;
769             printLocationAndOp(out, exec, location, it, "create_lexical_environment");
770             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
771             break;
772         }
773         case op_get_scope: {
774             int r0 = (++it)->u.operand;
775             printLocationOpAndRegisterOperand(out, exec, location, it, "get_scope", r0);
776             break;
777         }
778         case op_create_direct_arguments: {
779             int r0 = (++it)->u.operand;
780             printLocationAndOp(out, exec, location, it, "create_direct_arguments");
781             out.printf("%s", registerName(r0).data());
782             break;
783         }
784         case op_create_scoped_arguments: {
785             int r0 = (++it)->u.operand;
786             int r1 = (++it)->u.operand;
787             printLocationAndOp(out, exec, location, it, "create_scoped_arguments");
788             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
789             break;
790         }
791         case op_create_out_of_band_arguments: {
792             int r0 = (++it)->u.operand;
793             printLocationAndOp(out, exec, location, it, "create_out_of_band_arguments");
794             out.printf("%s", registerName(r0).data());
795             break;
796         }
797         case op_create_this: {
798             int r0 = (++it)->u.operand;
799             int r1 = (++it)->u.operand;
800             unsigned inferredInlineCapacity = (++it)->u.operand;
801             unsigned cachedFunction = (++it)->u.operand;
802             printLocationAndOp(out, exec, location, it, "create_this");
803             out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction);
804             break;
805         }
806         case op_to_this: {
807             int r0 = (++it)->u.operand;
808             printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
809             Structure* structure = (++it)->u.structure.get();
810             if (structure)
811                 out.print(", cache(struct = ", RawPointer(structure), ")");
812             out.print(", ", (++it)->u.toThisStatus);
813             break;
814         }
815         case op_check_tdz: {
816             int r0 = (++it)->u.operand;
817             printLocationOpAndRegisterOperand(out, exec, location, it, "op_check_tdz", r0);
818             break;
819         }
820         case op_new_object: {
821             int r0 = (++it)->u.operand;
822             unsigned inferredInlineCapacity = (++it)->u.operand;
823             printLocationAndOp(out, exec, location, it, "new_object");
824             out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
825             ++it; // Skip object allocation profile.
826             break;
827         }
828         case op_new_array: {
829             int dst = (++it)->u.operand;
830             int argv = (++it)->u.operand;
831             int argc = (++it)->u.operand;
832             printLocationAndOp(out, exec, location, it, "new_array");
833             out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
834             ++it; // Skip array allocation profile.
835             break;
836         }
837         case op_new_array_with_size: {
838             int dst = (++it)->u.operand;
839             int length = (++it)->u.operand;
840             printLocationAndOp(out, exec, location, it, "new_array_with_size");
841             out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
842             ++it; // Skip array allocation profile.
843             break;
844         }
845         case op_new_array_buffer: {
846             int dst = (++it)->u.operand;
847             int argv = (++it)->u.operand;
848             int argc = (++it)->u.operand;
849             printLocationAndOp(out, exec, location, it, "new_array_buffer");
850             out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
851             ++it; // Skip array allocation profile.
852             break;
853         }
854         case op_new_regexp: {
855             int r0 = (++it)->u.operand;
856             int re0 = (++it)->u.operand;
857             printLocationAndOp(out, exec, location, it, "new_regexp");
858             out.printf("%s, ", registerName(r0).data());
859             if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
860                 out.printf("%s", regexpName(re0, regexp(re0)).data());
861             else
862                 out.printf("bad_regexp(%d)", re0);
863             break;
864         }
865         case op_mov: {
866             int r0 = (++it)->u.operand;
867             int r1 = (++it)->u.operand;
868             printLocationAndOp(out, exec, location, it, "mov");
869             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
870             break;
871         }
872         case op_profile_type: {
873             int r0 = (++it)->u.operand;
874             ++it;
875             ++it;
876             ++it;
877             ++it;
878             printLocationAndOp(out, exec, location, it, "op_profile_type");
879             out.printf("%s", registerName(r0).data());
880             break;
881         }
882         case op_profile_control_flow: {
883             BasicBlockLocation* basicBlockLocation = (++it)->u.basicBlockLocation;
884             printLocationAndOp(out, exec, location, it, "profile_control_flow");
885             out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset());
886             break;
887         }
888         case op_not: {
889             printUnaryOp(out, exec, location, it, "not");
890             break;
891         }
892         case op_eq: {
893             printBinaryOp(out, exec, location, it, "eq");
894             break;
895         }
896         case op_eq_null: {
897             printUnaryOp(out, exec, location, it, "eq_null");
898             break;
899         }
900         case op_neq: {
901             printBinaryOp(out, exec, location, it, "neq");
902             break;
903         }
904         case op_neq_null: {
905             printUnaryOp(out, exec, location, it, "neq_null");
906             break;
907         }
908         case op_stricteq: {
909             printBinaryOp(out, exec, location, it, "stricteq");
910             break;
911         }
912         case op_nstricteq: {
913             printBinaryOp(out, exec, location, it, "nstricteq");
914             break;
915         }
916         case op_less: {
917             printBinaryOp(out, exec, location, it, "less");
918             break;
919         }
920         case op_lesseq: {
921             printBinaryOp(out, exec, location, it, "lesseq");
922             break;
923         }
924         case op_greater: {
925             printBinaryOp(out, exec, location, it, "greater");
926             break;
927         }
928         case op_greatereq: {
929             printBinaryOp(out, exec, location, it, "greatereq");
930             break;
931         }
932         case op_inc: {
933             int r0 = (++it)->u.operand;
934             printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
935             break;
936         }
937         case op_dec: {
938             int r0 = (++it)->u.operand;
939             printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
940             break;
941         }
942         case op_to_number: {
943             printUnaryOp(out, exec, location, it, "to_number");
944             break;
945         }
946         case op_to_string: {
947             printUnaryOp(out, exec, location, it, "to_string");
948             break;
949         }
950         case op_negate: {
951             printUnaryOp(out, exec, location, it, "negate");
952             break;
953         }
954         case op_add: {
955             printBinaryOp(out, exec, location, it, "add");
956             ++it;
957             break;
958         }
959         case op_mul: {
960             printBinaryOp(out, exec, location, it, "mul");
961             ++it;
962             break;
963         }
964         case op_div: {
965             printBinaryOp(out, exec, location, it, "div");
966             ++it;
967             break;
968         }
969         case op_mod: {
970             printBinaryOp(out, exec, location, it, "mod");
971             break;
972         }
973         case op_sub: {
974             printBinaryOp(out, exec, location, it, "sub");
975             ++it;
976             break;
977         }
978         case op_lshift: {
979             printBinaryOp(out, exec, location, it, "lshift");
980             break;            
981         }
982         case op_rshift: {
983             printBinaryOp(out, exec, location, it, "rshift");
984             break;
985         }
986         case op_urshift: {
987             printBinaryOp(out, exec, location, it, "urshift");
988             break;
989         }
990         case op_bitand: {
991             printBinaryOp(out, exec, location, it, "bitand");
992             ++it;
993             break;
994         }
995         case op_bitxor: {
996             printBinaryOp(out, exec, location, it, "bitxor");
997             ++it;
998             break;
999         }
1000         case op_bitor: {
1001             printBinaryOp(out, exec, location, it, "bitor");
1002             ++it;
1003             break;
1004         }
1005         case op_check_has_instance: {
1006             int r0 = (++it)->u.operand;
1007             int r1 = (++it)->u.operand;
1008             int r2 = (++it)->u.operand;
1009             int offset = (++it)->u.operand;
1010             printLocationAndOp(out, exec, location, it, "check_has_instance");
1011             out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
1012             break;
1013         }
1014         case op_instanceof: {
1015             int r0 = (++it)->u.operand;
1016             int r1 = (++it)->u.operand;
1017             int r2 = (++it)->u.operand;
1018             printLocationAndOp(out, exec, location, it, "instanceof");
1019             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1020             break;
1021         }
1022         case op_unsigned: {
1023             printUnaryOp(out, exec, location, it, "unsigned");
1024             break;
1025         }
1026         case op_typeof: {
1027             printUnaryOp(out, exec, location, it, "typeof");
1028             break;
1029         }
1030         case op_is_undefined: {
1031             printUnaryOp(out, exec, location, it, "is_undefined");
1032             break;
1033         }
1034         case op_is_boolean: {
1035             printUnaryOp(out, exec, location, it, "is_boolean");
1036             break;
1037         }
1038         case op_is_number: {
1039             printUnaryOp(out, exec, location, it, "is_number");
1040             break;
1041         }
1042         case op_is_string: {
1043             printUnaryOp(out, exec, location, it, "is_string");
1044             break;
1045         }
1046         case op_is_object: {
1047             printUnaryOp(out, exec, location, it, "is_object");
1048             break;
1049         }
1050         case op_is_object_or_null: {
1051             printUnaryOp(out, exec, location, it, "is_object_or_null");
1052             break;
1053         }
1054         case op_is_function: {
1055             printUnaryOp(out, exec, location, it, "is_function");
1056             break;
1057         }
1058         case op_in: {
1059             printBinaryOp(out, exec, location, it, "in");
1060             break;
1061         }
1062         case op_init_global_const_nop: {
1063             printLocationAndOp(out, exec, location, it, "init_global_const_nop");
1064             it++;
1065             it++;
1066             it++;
1067             it++;
1068             break;
1069         }
1070         case op_init_global_const: {
1071             WriteBarrier<Unknown>* variablePointer = (++it)->u.variablePointer;
1072             int r0 = (++it)->u.operand;
1073             printLocationAndOp(out, exec, location, it, "init_global_const");
1074             out.printf("g%d(%p), %s", m_globalObject->findVariableIndex(variablePointer).offset(), variablePointer, registerName(r0).data());
1075             it++;
1076             it++;
1077             break;
1078         }
1079         case op_get_by_id:
1080         case op_get_by_id_out_of_line:
1081         case op_get_array_length: {
1082             printGetByIdOp(out, exec, location, it);
1083             printGetByIdCacheStatus(out, exec, location, stubInfos);
1084             dumpValueProfiling(out, it, hasPrintedProfiling);
1085             break;
1086         }
1087         case op_put_by_id: {
1088             printPutByIdOp(out, exec, location, it, "put_by_id");
1089             printPutByIdCacheStatus(out, exec, location, stubInfos);
1090             break;
1091         }
1092         case op_put_by_id_out_of_line: {
1093             printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
1094             printPutByIdCacheStatus(out, exec, location, stubInfos);
1095             break;
1096         }
1097         case op_put_by_id_transition_direct: {
1098             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
1099             printPutByIdCacheStatus(out, exec, location, stubInfos);
1100             break;
1101         }
1102         case op_put_by_id_transition_direct_out_of_line: {
1103             printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
1104             printPutByIdCacheStatus(out, exec, location, stubInfos);
1105             break;
1106         }
1107         case op_put_by_id_transition_normal: {
1108             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
1109             printPutByIdCacheStatus(out, exec, location, stubInfos);
1110             break;
1111         }
1112         case op_put_by_id_transition_normal_out_of_line: {
1113             printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
1114             printPutByIdCacheStatus(out, exec, location, stubInfos);
1115             break;
1116         }
1117         case op_put_getter_by_id: {
1118             int r0 = (++it)->u.operand;
1119             int id0 = (++it)->u.operand;
1120             int r1 = (++it)->u.operand;
1121             printLocationAndOp(out, exec, location, it, "put_getter_by_id");
1122             out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
1123             break;
1124         }
1125         case op_put_setter_by_id: {
1126             int r0 = (++it)->u.operand;
1127             int id0 = (++it)->u.operand;
1128             int r1 = (++it)->u.operand;
1129             printLocationAndOp(out, exec, location, it, "put_setter_by_id");
1130             out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
1131             break;
1132         }
1133         case op_put_getter_setter: {
1134             int r0 = (++it)->u.operand;
1135             int id0 = (++it)->u.operand;
1136             int r1 = (++it)->u.operand;
1137             int r2 = (++it)->u.operand;
1138             printLocationAndOp(out, exec, location, it, "put_getter_setter");
1139             out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
1140             break;
1141         }
1142         case op_del_by_id: {
1143             int r0 = (++it)->u.operand;
1144             int r1 = (++it)->u.operand;
1145             int id0 = (++it)->u.operand;
1146             printLocationAndOp(out, exec, location, it, "del_by_id");
1147             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
1148             break;
1149         }
1150         case op_get_by_val: {
1151             int r0 = (++it)->u.operand;
1152             int r1 = (++it)->u.operand;
1153             int r2 = (++it)->u.operand;
1154             printLocationAndOp(out, exec, location, it, "get_by_val");
1155             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1156             dumpArrayProfiling(out, it, hasPrintedProfiling);
1157             dumpValueProfiling(out, it, hasPrintedProfiling);
1158             break;
1159         }
1160         case op_put_by_val: {
1161             int r0 = (++it)->u.operand;
1162             int r1 = (++it)->u.operand;
1163             int r2 = (++it)->u.operand;
1164             printLocationAndOp(out, exec, location, it, "put_by_val");
1165             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1166             dumpArrayProfiling(out, it, hasPrintedProfiling);
1167             break;
1168         }
1169         case op_put_by_val_direct: {
1170             int r0 = (++it)->u.operand;
1171             int r1 = (++it)->u.operand;
1172             int r2 = (++it)->u.operand;
1173             printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1174             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1175             dumpArrayProfiling(out, it, hasPrintedProfiling);
1176             break;
1177         }
1178         case op_del_by_val: {
1179             int r0 = (++it)->u.operand;
1180             int r1 = (++it)->u.operand;
1181             int r2 = (++it)->u.operand;
1182             printLocationAndOp(out, exec, location, it, "del_by_val");
1183             out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1184             break;
1185         }
1186         case op_put_by_index: {
1187             int r0 = (++it)->u.operand;
1188             unsigned n0 = (++it)->u.operand;
1189             int r1 = (++it)->u.operand;
1190             printLocationAndOp(out, exec, location, it, "put_by_index");
1191             out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1192             break;
1193         }
1194         case op_jmp: {
1195             int offset = (++it)->u.operand;
1196             printLocationAndOp(out, exec, location, it, "jmp");
1197             out.printf("%d(->%d)", offset, location + offset);
1198             break;
1199         }
1200         case op_jtrue: {
1201             printConditionalJump(out, exec, begin, it, location, "jtrue");
1202             break;
1203         }
1204         case op_jfalse: {
1205             printConditionalJump(out, exec, begin, it, location, "jfalse");
1206             break;
1207         }
1208         case op_jeq_null: {
1209             printConditionalJump(out, exec, begin, it, location, "jeq_null");
1210             break;
1211         }
1212         case op_jneq_null: {
1213             printConditionalJump(out, exec, begin, it, location, "jneq_null");
1214             break;
1215         }
1216         case op_jneq_ptr: {
1217             int r0 = (++it)->u.operand;
1218             Special::Pointer pointer = (++it)->u.specialPointer;
1219             int offset = (++it)->u.operand;
1220             printLocationAndOp(out, exec, location, it, "jneq_ptr");
1221             out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1222             break;
1223         }
1224         case op_jless: {
1225             int r0 = (++it)->u.operand;
1226             int r1 = (++it)->u.operand;
1227             int offset = (++it)->u.operand;
1228             printLocationAndOp(out, exec, location, it, "jless");
1229             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1230             break;
1231         }
1232         case op_jlesseq: {
1233             int r0 = (++it)->u.operand;
1234             int r1 = (++it)->u.operand;
1235             int offset = (++it)->u.operand;
1236             printLocationAndOp(out, exec, location, it, "jlesseq");
1237             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1238             break;
1239         }
1240         case op_jgreater: {
1241             int r0 = (++it)->u.operand;
1242             int r1 = (++it)->u.operand;
1243             int offset = (++it)->u.operand;
1244             printLocationAndOp(out, exec, location, it, "jgreater");
1245             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1246             break;
1247         }
1248         case op_jgreatereq: {
1249             int r0 = (++it)->u.operand;
1250             int r1 = (++it)->u.operand;
1251             int offset = (++it)->u.operand;
1252             printLocationAndOp(out, exec, location, it, "jgreatereq");
1253             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1254             break;
1255         }
1256         case op_jnless: {
1257             int r0 = (++it)->u.operand;
1258             int r1 = (++it)->u.operand;
1259             int offset = (++it)->u.operand;
1260             printLocationAndOp(out, exec, location, it, "jnless");
1261             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1262             break;
1263         }
1264         case op_jnlesseq: {
1265             int r0 = (++it)->u.operand;
1266             int r1 = (++it)->u.operand;
1267             int offset = (++it)->u.operand;
1268             printLocationAndOp(out, exec, location, it, "jnlesseq");
1269             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1270             break;
1271         }
1272         case op_jngreater: {
1273             int r0 = (++it)->u.operand;
1274             int r1 = (++it)->u.operand;
1275             int offset = (++it)->u.operand;
1276             printLocationAndOp(out, exec, location, it, "jngreater");
1277             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1278             break;
1279         }
1280         case op_jngreatereq: {
1281             int r0 = (++it)->u.operand;
1282             int r1 = (++it)->u.operand;
1283             int offset = (++it)->u.operand;
1284             printLocationAndOp(out, exec, location, it, "jngreatereq");
1285             out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1286             break;
1287         }
1288         case op_loop_hint: {
1289             printLocationAndOp(out, exec, location, it, "loop_hint");
1290             break;
1291         }
1292         case op_switch_imm: {
1293             int tableIndex = (++it)->u.operand;
1294             int defaultTarget = (++it)->u.operand;
1295             int scrutineeRegister = (++it)->u.operand;
1296             printLocationAndOp(out, exec, location, it, "switch_imm");
1297             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1298             break;
1299         }
1300         case op_switch_char: {
1301             int tableIndex = (++it)->u.operand;
1302             int defaultTarget = (++it)->u.operand;
1303             int scrutineeRegister = (++it)->u.operand;
1304             printLocationAndOp(out, exec, location, it, "switch_char");
1305             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1306             break;
1307         }
1308         case op_switch_string: {
1309             int tableIndex = (++it)->u.operand;
1310             int defaultTarget = (++it)->u.operand;
1311             int scrutineeRegister = (++it)->u.operand;
1312             printLocationAndOp(out, exec, location, it, "switch_string");
1313             out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1314             break;
1315         }
1316         case op_new_func: {
1317             int r0 = (++it)->u.operand;
1318             int r1 = (++it)->u.operand;
1319             int f0 = (++it)->u.operand;
1320             printLocationAndOp(out, exec, location, it, "new_func");
1321             out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1322             break;
1323         }
1324         case op_new_func_exp: {
1325             int r0 = (++it)->u.operand;
1326             int r1 = (++it)->u.operand;
1327             int f0 = (++it)->u.operand;
1328             printLocationAndOp(out, exec, location, it, "new_func_exp");
1329             out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0);
1330             break;
1331         }
1332         case op_call: {
1333             printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1334             break;
1335         }
1336         case op_call_eval: {
1337             printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
1338             break;
1339         }
1340             
1341         case op_construct_varargs:
1342         case op_call_varargs: {
1343             int result = (++it)->u.operand;
1344             int callee = (++it)->u.operand;
1345             int thisValue = (++it)->u.operand;
1346             int arguments = (++it)->u.operand;
1347             int firstFreeRegister = (++it)->u.operand;
1348             int varArgOffset = (++it)->u.operand;
1349             ++it;
1350             printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : "construct_varargs");
1351             out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
1352             dumpValueProfiling(out, it, hasPrintedProfiling);
1353             break;
1354         }
1355
1356         case op_ret: {
1357             int r0 = (++it)->u.operand;
1358             printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1359             break;
1360         }
1361         case op_construct: {
1362             printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
1363             break;
1364         }
1365         case op_strcat: {
1366             int r0 = (++it)->u.operand;
1367             int r1 = (++it)->u.operand;
1368             int count = (++it)->u.operand;
1369             printLocationAndOp(out, exec, location, it, "strcat");
1370             out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1371             break;
1372         }
1373         case op_to_primitive: {
1374             int r0 = (++it)->u.operand;
1375             int r1 = (++it)->u.operand;
1376             printLocationAndOp(out, exec, location, it, "to_primitive");
1377             out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1378             break;
1379         }
1380         case op_get_enumerable_length: {
1381             int dst = it[1].u.operand;
1382             int base = it[2].u.operand;
1383             printLocationAndOp(out, exec, location, it, "op_get_enumerable_length");
1384             out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1385             it += OPCODE_LENGTH(op_get_enumerable_length) - 1;
1386             break;
1387         }
1388         case op_has_indexed_property: {
1389             int dst = it[1].u.operand;
1390             int base = it[2].u.operand;
1391             int propertyName = it[3].u.operand;
1392             ArrayProfile* arrayProfile = it[4].u.arrayProfile;
1393             printLocationAndOp(out, exec, location, it, "op_has_indexed_property");
1394             out.printf("%s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), arrayProfile);
1395             it += OPCODE_LENGTH(op_has_indexed_property) - 1;
1396             break;
1397         }
1398         case op_has_structure_property: {
1399             int dst = it[1].u.operand;
1400             int base = it[2].u.operand;
1401             int propertyName = it[3].u.operand;
1402             int enumerator = it[4].u.operand;
1403             printLocationAndOp(out, exec, location, it, "op_has_structure_property");
1404             out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data());
1405             it += OPCODE_LENGTH(op_has_structure_property) - 1;
1406             break;
1407         }
1408         case op_has_generic_property: {
1409             int dst = it[1].u.operand;
1410             int base = it[2].u.operand;
1411             int propertyName = it[3].u.operand;
1412             printLocationAndOp(out, exec, location, it, "op_has_generic_property");
1413             out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data());
1414             it += OPCODE_LENGTH(op_has_generic_property) - 1;
1415             break;
1416         }
1417         case op_get_direct_pname: {
1418             int dst = it[1].u.operand;
1419             int base = it[2].u.operand;
1420             int propertyName = it[3].u.operand;
1421             int index = it[4].u.operand;
1422             int enumerator = it[5].u.operand;
1423             ValueProfile* profile = it[6].u.profile;
1424             printLocationAndOp(out, exec, location, it, "op_get_direct_pname");
1425             out.printf("%s, %s, %s, %s, %s, %p", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data(), profile);
1426             it += OPCODE_LENGTH(op_get_direct_pname) - 1;
1427             break;
1428
1429         }
1430         case op_get_property_enumerator: {
1431             int dst = it[1].u.operand;
1432             int base = it[2].u.operand;
1433             printLocationAndOp(out, exec, location, it, "op_get_property_enumerator");
1434             out.printf("%s, %s", registerName(dst).data(), registerName(base).data());
1435             it += OPCODE_LENGTH(op_get_property_enumerator) - 1;
1436             break;
1437         }
1438         case op_enumerator_structure_pname: {
1439             int dst = it[1].u.operand;
1440             int enumerator = it[2].u.operand;
1441             int index = it[3].u.operand;
1442             printLocationAndOp(out, exec, location, it, "op_enumerator_structure_pname");
1443             out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1444             it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1;
1445             break;
1446         }
1447         case op_enumerator_generic_pname: {
1448             int dst = it[1].u.operand;
1449             int enumerator = it[2].u.operand;
1450             int index = it[3].u.operand;
1451             printLocationAndOp(out, exec, location, it, "op_enumerator_generic_pname");
1452             out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data());
1453             it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1;
1454             break;
1455         }
1456         case op_to_index_string: {
1457             int dst = it[1].u.operand;
1458             int index = it[2].u.operand;
1459             printLocationAndOp(out, exec, location, it, "op_to_index_string");
1460             out.printf("%s, %s", registerName(dst).data(), registerName(index).data());
1461             it += OPCODE_LENGTH(op_to_index_string) - 1;
1462             break;
1463         }
1464         case op_push_with_scope: {
1465             int dst = (++it)->u.operand;
1466             int newScope = (++it)->u.operand;
1467             printLocationAndOp(out, exec, location, it, "push_with_scope");
1468             out.printf("%s, %s", registerName(dst).data(), registerName(newScope).data());
1469             break;
1470         }
1471         case op_pop_scope: {
1472             int r0 = (++it)->u.operand;
1473             printLocationOpAndRegisterOperand(out, exec, location, it, "pop_scope", r0);
1474             break;
1475         }
1476         case op_push_name_scope: {
1477             int dst = (++it)->u.operand;
1478             int r1 = (++it)->u.operand;
1479             int k0 = (++it)->u.operand;
1480             JSNameScope::Type scopeType = (JSNameScope::Type)(++it)->u.operand;
1481             printLocationAndOp(out, exec, location, it, "push_name_scope");
1482             out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(r1).data(), constantName(k0).data(), (scopeType == JSNameScope::FunctionNameScope) ? "functionScope" : ((scopeType == JSNameScope::CatchScope) ? "catchScope" : "unknownScopeType"));
1483             break;
1484         }
1485         case op_catch: {
1486             int r0 = (++it)->u.operand;
1487             printLocationOpAndRegisterOperand(out, exec, location, it, "catch", r0);
1488             break;
1489         }
1490         case op_throw: {
1491             int r0 = (++it)->u.operand;
1492             printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1493             break;
1494         }
1495         case op_throw_static_error: {
1496             int k0 = (++it)->u.operand;
1497             int k1 = (++it)->u.operand;
1498             printLocationAndOp(out, exec, location, it, "throw_static_error");
1499             out.printf("%s, %s", constantName(k0).data(), k1 ? "true" : "false");
1500             break;
1501         }
1502         case op_debug: {
1503             int debugHookID = (++it)->u.operand;
1504             int hasBreakpointFlag = (++it)->u.operand;
1505             printLocationAndOp(out, exec, location, it, "debug");
1506             out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
1507             break;
1508         }
1509         case op_profile_will_call: {
1510             int function = (++it)->u.operand;
1511             printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
1512             break;
1513         }
1514         case op_profile_did_call: {
1515             int function = (++it)->u.operand;
1516             printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
1517             break;
1518         }
1519         case op_end: {
1520             int r0 = (++it)->u.operand;
1521             printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1522             break;
1523         }
1524         case op_resolve_scope: {
1525             int r0 = (++it)->u.operand;
1526             int scope = (++it)->u.operand;
1527             int id0 = (++it)->u.operand;
1528             ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1529             int depth = (++it)->u.operand;
1530             printLocationAndOp(out, exec, location, it, "resolve_scope");
1531             out.printf("%s, %s, %s, %u<%s|%s>, %d", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(),
1532                 modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1533                 depth);
1534             ++it;
1535             break;
1536         }
1537         case op_get_from_scope: {
1538             int r0 = (++it)->u.operand;
1539             int r1 = (++it)->u.operand;
1540             int id0 = (++it)->u.operand;
1541             ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1542             ++it; // Structure
1543             int operand = (++it)->u.operand; // Operand
1544             printLocationAndOp(out, exec, location, it, "get_from_scope");
1545             out.print(registerName(r0), ", ", registerName(r1));
1546             if (static_cast<unsigned>(id0) == UINT_MAX)
1547                 out.print(", anonymous");
1548             else
1549                 out.print(", ", idName(id0, identifier(id0)));
1550             out.print(", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, ", operand);
1551             dumpValueProfiling(out, it, hasPrintedProfiling);
1552             break;
1553         }
1554         case op_put_to_scope: {
1555             int r0 = (++it)->u.operand;
1556             int id0 = (++it)->u.operand;
1557             int r1 = (++it)->u.operand;
1558             ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1559             ++it; // Structure
1560             int operand = (++it)->u.operand; // Operand
1561             printLocationAndOp(out, exec, location, it, "put_to_scope");
1562             out.print(registerName(r0));
1563             if (static_cast<unsigned>(id0) == UINT_MAX)
1564                 out.print(", anonymous");
1565             else
1566                 out.print(", ", idName(id0, identifier(id0)));
1567             out.print(", ", registerName(r1), ", ", modeAndType.operand(), "<", resolveModeName(modeAndType.mode()), "|", resolveTypeName(modeAndType.type()), ">, <structure>, ", operand);
1568             break;
1569         }
1570         case op_get_from_arguments: {
1571             int r0 = (++it)->u.operand;
1572             int r1 = (++it)->u.operand;
1573             int offset = (++it)->u.operand;
1574             printLocationAndOp(out, exec, location, it, "get_from_arguments");
1575             out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset);
1576             dumpValueProfiling(out, it, hasPrintedProfiling);
1577             break;
1578         }
1579         case op_put_to_arguments: {
1580             int r0 = (++it)->u.operand;
1581             int offset = (++it)->u.operand;
1582             int r1 = (++it)->u.operand;
1583             printLocationAndOp(out, exec, location, it, "put_to_arguments");
1584             out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data());
1585             break;
1586         }
1587         default:
1588             RELEASE_ASSERT_NOT_REACHED();
1589     }
1590
1591     dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1592     dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1593     
1594 #if ENABLE(DFG_JIT)
1595     Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1596     if (!exitSites.isEmpty()) {
1597         out.print(" !! frequent exits: ");
1598         CommaPrinter comma;
1599         for (unsigned i = 0; i < exitSites.size(); ++i)
1600             out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
1601     }
1602 #else // ENABLE(DFG_JIT)
1603     UNUSED_PARAM(location);
1604 #endif // ENABLE(DFG_JIT)
1605     out.print("\n");
1606 }
1607
1608 void CodeBlock::dumpBytecode(
1609     PrintStream& out, unsigned bytecodeOffset,
1610     const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
1611 {
1612     ExecState* exec = m_globalObject->globalExec();
1613     const Instruction* it = instructions().begin() + bytecodeOffset;
1614     dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
1615 }
1616
1617 #define FOR_EACH_MEMBER_VECTOR(macro) \
1618     macro(instructions) \
1619     macro(callLinkInfos) \
1620     macro(linkedCallerList) \
1621     macro(identifiers) \
1622     macro(functionExpressions) \
1623     macro(constantRegisters)
1624
1625 #define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1626     macro(regexps) \
1627     macro(functions) \
1628     macro(exceptionHandlers) \
1629     macro(switchJumpTables) \
1630     macro(stringSwitchJumpTables) \
1631     macro(evalCodeCache) \
1632     macro(expressionInfo) \
1633     macro(lineInfo) \
1634     macro(callReturnIndexVector)
1635
1636 template<typename T>
1637 static size_t sizeInBytes(const Vector<T>& vector)
1638 {
1639     return vector.capacity() * sizeof(T);
1640 }
1641
1642 namespace {
1643
1644 class PutToScopeFireDetail : public FireDetail {
1645 public:
1646     PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident)
1647         : m_codeBlock(codeBlock)
1648         , m_ident(ident)
1649     {
1650     }
1651     
1652     virtual void dump(PrintStream& out) const override
1653     {
1654         out.print("Linking put_to_scope in ", FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for ", m_ident);
1655     }
1656     
1657 private:
1658     CodeBlock* m_codeBlock;
1659     const Identifier& m_ident;
1660 };
1661
1662 } // anonymous namespace
1663
1664 CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1665     : m_globalObject(other.m_globalObject)
1666     , m_heap(other.m_heap)
1667     , m_numCalleeRegisters(other.m_numCalleeRegisters)
1668     , m_numVars(other.m_numVars)
1669     , m_isConstructor(other.m_isConstructor)
1670     , m_shouldAlwaysBeInlined(true)
1671     , m_didFailFTLCompilation(false)
1672     , m_hasBeenCompiledWithFTL(false)
1673     , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1674     , m_hasDebuggerStatement(false)
1675     , m_steppingMode(SteppingModeDisabled)
1676     , m_numBreakpoints(0)
1677     , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1678     , m_vm(other.m_vm)
1679     , m_instructions(other.m_instructions)
1680     , m_thisRegister(other.m_thisRegister)
1681     , m_scopeRegister(other.m_scopeRegister)
1682     , m_lexicalEnvironmentRegister(other.m_lexicalEnvironmentRegister)
1683     , m_isStrictMode(other.m_isStrictMode)
1684     , m_needsActivation(other.m_needsActivation)
1685     , m_mayBeExecuting(false)
1686     , m_source(other.m_source)
1687     , m_sourceOffset(other.m_sourceOffset)
1688     , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1689     , m_codeType(other.m_codeType)
1690     , m_constantRegisters(other.m_constantRegisters)
1691     , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation)
1692     , m_functionDecls(other.m_functionDecls)
1693     , m_functionExprs(other.m_functionExprs)
1694     , m_osrExitCounter(0)
1695     , m_optimizationDelayCounter(0)
1696     , m_reoptimizationRetryCounter(0)
1697     , m_hash(other.m_hash)
1698 #if ENABLE(JIT)
1699     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1700 #endif
1701 {
1702     m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1703
1704     ASSERT(m_heap->isDeferred());
1705     ASSERT(m_scopeRegister.isLocal());
1706
1707     if (SymbolTable* symbolTable = other.symbolTable())
1708         m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1709     
1710     setNumParameters(other.numParameters());
1711     optimizeAfterWarmUp();
1712     jitAfterWarmUp();
1713
1714     if (other.m_rareData) {
1715         createRareDataIfNecessary();
1716         
1717         m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1718         m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1719         m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1720         m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1721     }
1722     
1723     m_heap->m_codeBlocks.add(this);
1724     m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock));
1725 }
1726
1727 CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1728     : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1729     , m_heap(&m_globalObject->vm().heap)
1730     , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1731     , m_numVars(unlinkedCodeBlock->m_numVars)
1732     , m_isConstructor(unlinkedCodeBlock->isConstructor())
1733     , m_shouldAlwaysBeInlined(true)
1734     , m_didFailFTLCompilation(false)
1735     , m_hasBeenCompiledWithFTL(false)
1736     , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1737     , m_hasDebuggerStatement(false)
1738     , m_steppingMode(SteppingModeDisabled)
1739     , m_numBreakpoints(0)
1740     , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1741     , m_vm(unlinkedCodeBlock->vm())
1742     , m_thisRegister(unlinkedCodeBlock->thisRegister())
1743     , m_scopeRegister(unlinkedCodeBlock->scopeRegister())
1744     , m_lexicalEnvironmentRegister(unlinkedCodeBlock->activationRegister())
1745     , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1746     , m_needsActivation(unlinkedCodeBlock->hasActivationRegister() && unlinkedCodeBlock->codeType() == FunctionCode)
1747     , m_mayBeExecuting(false)
1748     , m_source(sourceProvider)
1749     , m_sourceOffset(sourceOffset)
1750     , m_firstLineColumnOffset(firstLineColumnOffset)
1751     , m_codeType(unlinkedCodeBlock->codeType())
1752     , m_osrExitCounter(0)
1753     , m_optimizationDelayCounter(0)
1754     , m_reoptimizationRetryCounter(0)
1755 #if ENABLE(JIT)
1756     , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1757 #endif
1758 {
1759     m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1760
1761     ASSERT(m_heap->isDeferred());
1762     ASSERT(m_scopeRegister.isLocal());
1763
1764     bool didCloneSymbolTable = false;
1765     
1766     if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
1767         if (m_vm->typeProfiler()) {
1768             ConcurrentJITLocker locker(symbolTable->m_lock);
1769             symbolTable->prepareForTypeProfiling(locker);
1770         }
1771
1772         if (codeType() == FunctionCode && symbolTable->scopeSize()) {
1773             m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->cloneScopePart(*m_vm));
1774             didCloneSymbolTable = true;
1775         } else
1776             m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1777     }
1778     
1779     ASSERT(m_source);
1780     setNumParameters(unlinkedCodeBlock->numParameters());
1781
1782     if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1783         vm()->functionHasExecutedCache()->removeUnexecutedRange(m_ownerExecutable->sourceID(), m_ownerExecutable->typeProfilingStartOffset(), m_ownerExecutable->typeProfilingEndOffset());
1784
1785     setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation());
1786     if (unlinkedCodeBlock->usesGlobalObject())
1787         m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, ownerExecutable, m_globalObject.get());
1788
1789     for (unsigned i = 0; i < LinkTimeConstantCount; i++) {
1790         LinkTimeConstant type = static_cast<LinkTimeConstant>(i);
1791         if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type))
1792             m_constantRegisters[registerIndex].set(*m_vm, ownerExecutable, m_globalObject->jsCellForLinkTimeConstant(type));
1793     }
1794
1795     m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
1796     for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1797         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1798         if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1799             vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1800         m_functionDecls[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1801     }
1802
1803     m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
1804     for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1805         UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1806         if (vm()->typeProfiler() || vm()->controlFlowProfiler())
1807             vm()->functionHasExecutedCache()->insertUnexecutedRange(m_ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset());
1808         m_functionExprs[i].set(*m_vm, ownerExecutable, unlinkedExecutable->link(*m_vm, ownerExecutable->source()));
1809     }
1810
1811     if (unlinkedCodeBlock->hasRareData()) {
1812         createRareDataIfNecessary();
1813         if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1814             m_rareData->m_constantBuffers.grow(count);
1815             for (size_t i = 0; i < count; i++) {
1816                 const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1817                 m_rareData->m_constantBuffers[i] = buffer;
1818             }
1819         }
1820         if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1821             m_rareData->m_exceptionHandlers.resizeToFit(count);
1822             size_t nonLocalScopeDepth = scope->depth();
1823             for (size_t i = 0; i < count; i++) {
1824                 const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
1825                 HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
1826 #if ENABLE(JIT)
1827                 handler.initialize(unlinkedHandler, nonLocalScopeDepth,
1828                     CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch))));
1829 #else
1830                 handler.initialize(unlinkedHandler, nonLocalScopeDepth);
1831 #endif
1832             }
1833         }
1834
1835         if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1836             m_rareData->m_stringSwitchJumpTables.grow(count);
1837             for (size_t i = 0; i < count; i++) {
1838                 UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1839                 UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1840                 for (; ptr != end; ++ptr) {
1841                     OffsetLocation offset;
1842                     offset.branchOffset = ptr->value;
1843                     m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1844                 }
1845             }
1846         }
1847
1848         if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1849             m_rareData->m_switchJumpTables.grow(count);
1850             for (size_t i = 0; i < count; i++) {
1851                 UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1852                 SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1853                 destTable.branchOffsets = sourceTable.branchOffsets;
1854                 destTable.min = sourceTable.min;
1855             }
1856         }
1857     }
1858
1859     // Allocate metadata buffers for the bytecode
1860     if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1861         m_llintCallLinkInfos.resizeToFit(size);
1862     if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1863         m_arrayProfiles.grow(size);
1864     if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1865         m_arrayAllocationProfiles.resizeToFit(size);
1866     if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1867         m_valueProfiles.resizeToFit(size);
1868     if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1869         m_objectAllocationProfiles.resizeToFit(size);
1870
1871     // Copy and translate the UnlinkedInstructions
1872     unsigned instructionCount = unlinkedCodeBlock->instructions().count();
1873     UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
1874
1875     Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1876     for (unsigned i = 0; !instructionReader.atEnd(); ) {
1877         const UnlinkedInstruction* pc = instructionReader.next();
1878
1879         unsigned opLength = opcodeLength(pc[0].u.opcode);
1880
1881         instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
1882         for (size_t j = 1; j < opLength; ++j) {
1883             if (sizeof(int32_t) != sizeof(intptr_t))
1884                 instructions[i + j].u.pointer = 0;
1885             instructions[i + j].u.operand = pc[j].u.operand;
1886         }
1887         switch (pc[0].u.opcode) {
1888         case op_has_indexed_property: {
1889             int arrayProfileIndex = pc[opLength - 1].u.operand;
1890             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1891
1892             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1893             break;
1894         }
1895         case op_call_varargs:
1896         case op_construct_varargs:
1897         case op_get_by_val: {
1898             int arrayProfileIndex = pc[opLength - 2].u.operand;
1899             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1900
1901             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1902             FALLTHROUGH;
1903         }
1904         case op_get_direct_pname:
1905         case op_get_by_id:
1906         case op_get_from_arguments: {
1907             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1908             ASSERT(profile->m_bytecodeOffset == -1);
1909             profile->m_bytecodeOffset = i;
1910             instructions[i + opLength - 1] = profile;
1911             break;
1912         }
1913         case op_put_by_val: {
1914             int arrayProfileIndex = pc[opLength - 1].u.operand;
1915             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1916             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1917             break;
1918         }
1919         case op_put_by_val_direct: {
1920             int arrayProfileIndex = pc[opLength - 1].u.operand;
1921             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1922             instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1923             break;
1924         }
1925
1926         case op_new_array:
1927         case op_new_array_buffer:
1928         case op_new_array_with_size: {
1929             int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
1930             instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1931             break;
1932         }
1933         case op_new_object: {
1934             int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
1935             ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1936             int inferredInlineCapacity = pc[opLength - 2].u.operand;
1937
1938             instructions[i + opLength - 1] = objectAllocationProfile;
1939             objectAllocationProfile->initialize(*vm(),
1940                 m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
1941             break;
1942         }
1943
1944         case op_call:
1945         case op_call_eval: {
1946             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1947             ASSERT(profile->m_bytecodeOffset == -1);
1948             profile->m_bytecodeOffset = i;
1949             instructions[i + opLength - 1] = profile;
1950             int arrayProfileIndex = pc[opLength - 2].u.operand;
1951             m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1952             instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1953             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1954             break;
1955         }
1956         case op_construct: {
1957             instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1958             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1959             ASSERT(profile->m_bytecodeOffset == -1);
1960             profile->m_bytecodeOffset = i;
1961             instructions[i + opLength - 1] = profile;
1962             break;
1963         }
1964         case op_get_by_id_out_of_line:
1965         case op_get_array_length:
1966             CRASH();
1967
1968         case op_init_global_const_nop: {
1969             ASSERT(codeType() == GlobalCode);
1970             Identifier ident = identifier(pc[4].u.operand);
1971             SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
1972             if (entry.isNull())
1973                 break;
1974
1975             instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
1976             instructions[i + 1] = &m_globalObject->variableAt(entry.varOffset().scopeOffset());
1977             break;
1978         }
1979
1980         case op_resolve_scope: {
1981             const Identifier& ident = identifier(pc[3].u.operand);
1982             ResolveType type = static_cast<ResolveType>(pc[4].u.operand);
1983             RELEASE_ASSERT(type != LocalClosureVar);
1984
1985             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Get, type);
1986             instructions[i + 4].u.operand = op.type;
1987             instructions[i + 5].u.operand = op.depth;
1988             if (op.lexicalEnvironment)
1989                 instructions[i + 6].u.symbolTable.set(*vm(), ownerExecutable, op.lexicalEnvironment->symbolTable());
1990             break;
1991         }
1992
1993         case op_get_from_scope: {
1994             ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1995             ASSERT(profile->m_bytecodeOffset == -1);
1996             profile->m_bytecodeOffset = i;
1997             instructions[i + opLength - 1] = profile;
1998
1999             // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
2000
2001             ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
2002             if (modeAndType.type() == LocalClosureVar) {
2003                 instructions[i + 4] = ResolveModeAndType(modeAndType.mode(), ClosureVar).operand();
2004                 break;
2005             }
2006
2007             const Identifier& ident = identifier(pc[3].u.operand);
2008
2009             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Get, modeAndType.type());
2010
2011             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
2012             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
2013                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2014             else if (op.structure)
2015                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
2016             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2017             break;
2018         }
2019
2020         case op_put_to_scope: {
2021             // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
2022             ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
2023             if (modeAndType.type() == LocalClosureVar) {
2024                 // Only do watching if the property we're putting to is not anonymous.
2025                 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) {
2026                     RELEASE_ASSERT(didCloneSymbolTable);
2027                     const Identifier& ident = identifier(pc[2].u.operand);
2028                     ConcurrentJITLocker locker(m_symbolTable->m_lock);
2029                     SymbolTable::Map::iterator iter = m_symbolTable->find(locker, ident.impl());
2030                     ASSERT(iter != m_symbolTable->end(locker));
2031                     iter->value.prepareToWatch();
2032                     instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();
2033                 } else
2034                     instructions[i + 5].u.watchpointSet = nullptr;
2035                 break;
2036             }
2037
2038             const Identifier& ident = identifier(pc[2].u.operand);
2039
2040             ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, Put, modeAndType.type());
2041
2042             instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
2043             if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
2044                 instructions[i + 5].u.watchpointSet = op.watchpointSet;
2045             else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
2046                 if (op.watchpointSet)
2047                     op.watchpointSet->invalidate(PutToScopeFireDetail(this, ident));
2048             } else if (op.structure)
2049                 instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
2050             instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
2051
2052             break;
2053         }
2054
2055         case op_profile_type: {
2056             RELEASE_ASSERT(vm()->typeProfiler());
2057             // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType?
2058             size_t instructionOffset = i + opLength - 1;
2059             unsigned divotStart, divotEnd;
2060             GlobalVariableID globalVariableID = 0;
2061             RefPtr<TypeSet> globalTypeSet;
2062             bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd);
2063             VirtualRegister profileRegister(pc[1].u.operand);
2064             ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);
2065             SymbolTable* symbolTable = nullptr;
2066
2067             switch (flag) {
2068             case ProfileTypeBytecodePutToScope:
2069             case ProfileTypeBytecodeGetFromScope: {
2070                 const Identifier& ident = identifier(pc[4].u.operand);
2071                 ResolveType type = static_cast<ResolveType>(pc[5].u.operand);
2072                 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), needsActivation(), scope, ident, (flag == ProfileTypeBytecodeGetFromScope ? Get : Put), type);
2073
2074                 // FIXME: handle other values for op.type here, and also consider what to do when we can't statically determine the globalID
2075                 // https://bugs.webkit.org/show_bug.cgi?id=135184
2076                 if (op.type == ClosureVar)
2077                     symbolTable = op.lexicalEnvironment->symbolTable();
2078                 else if (op.type == GlobalVar)
2079                     symbolTable = m_globalObject.get()->symbolTable();
2080                 
2081                 if (symbolTable) {
2082                     ConcurrentJITLocker locker(symbolTable->m_lock);
2083                     // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2084                     symbolTable->prepareForTypeProfiling(locker);
2085                     globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2086                     globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2087                 } else
2088                     globalVariableID = TypeProfilerNoGlobalIDExists;
2089
2090                 break;
2091             }
2092             case ProfileTypeBytecodePutToLocalScope:
2093             case ProfileTypeBytecodeGetFromLocalScope: {
2094                 const Identifier& ident = identifier(pc[4].u.operand);
2095                 symbolTable = m_symbolTable.get();
2096                 ConcurrentJITLocker locker(symbolTable->m_lock);
2097                 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet.
2098                 symbolTable->prepareForTypeProfiling(locker);
2099                 globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), *vm());
2100                 globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), *vm());
2101
2102                 break;
2103             }
2104
2105             case ProfileTypeBytecodeHasGlobalID: {
2106                 symbolTable = m_symbolTable.get();
2107                 ConcurrentJITLocker locker(symbolTable->m_lock);
2108                 globalVariableID = symbolTable->uniqueIDForOffset(locker, VarOffset(profileRegister), *vm());
2109                 globalTypeSet = symbolTable->globalTypeSetForOffset(locker, VarOffset(profileRegister), *vm());
2110                 break;
2111             }
2112             case ProfileTypeBytecodeDoesNotHaveGlobalID: 
2113             case ProfileTypeBytecodeFunctionArgument: {
2114                 globalVariableID = TypeProfilerNoGlobalIDExists;
2115                 break;
2116             }
2117             case ProfileTypeBytecodeFunctionReturnStatement: {
2118                 RELEASE_ASSERT(ownerExecutable->isFunctionExecutable());
2119                 globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet();
2120                 globalVariableID = TypeProfilerReturnStatement;
2121                 if (!shouldAnalyze) {
2122                     // Because a return statement can be added implicitly to return undefined at the end of a function,
2123                     // and these nodes don't emit expression ranges because they aren't in the actual source text of
2124                     // the user's program, give the type profiler some range to identify these return statements.
2125                     // Currently, the text offset that is used as identification is on the open brace of the function 
2126                     // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable.
2127                     divotStart = divotEnd = m_sourceOffset;
2128                     shouldAnalyze = true;
2129                 }
2130                 break;
2131             }
2132             }
2133
2134             std::pair<TypeLocation*, bool> locationPair = vm()->typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID,
2135                 m_ownerExecutable->sourceID(), divotStart, divotEnd, globalTypeSet, vm());
2136             TypeLocation* location = locationPair.first;
2137             bool isNewLocation = locationPair.second;
2138
2139             if (flag == ProfileTypeBytecodeFunctionReturnStatement)
2140                 location->m_divotForFunctionOffsetIfReturnStatement = m_sourceOffset;
2141
2142             if (shouldAnalyze && isNewLocation)
2143                 vm()->typeProfiler()->insertNewLocation(location);
2144
2145             instructions[i + 2].u.location = location;
2146             break;
2147         }
2148
2149         case op_debug: {
2150             if (pc[1].u.index == DidReachBreakpoint)
2151                 m_hasDebuggerStatement = true;
2152             break;
2153         }
2154
2155         default:
2156             break;
2157         }
2158         i += opLength;
2159     }
2160
2161     if (vm()->controlFlowProfiler())
2162         insertBasicBlockBoundariesForControlFlowProfiler(instructions);
2163
2164     m_instructions = WTF::RefCountedArray<Instruction>(instructions);
2165
2166     // Set optimization thresholds only after m_instructions is initialized, since these
2167     // rely on the instruction count (and are in theory permitted to also inspect the
2168     // instruction stream to more accurate assess the cost of tier-up).
2169     optimizeAfterWarmUp();
2170     jitAfterWarmUp();
2171
2172     // If the concurrent thread will want the code block's hash, then compute it here
2173     // synchronously.
2174     if (Options::alwaysComputeHash())
2175         hash();
2176
2177     if (Options::dumpGeneratedBytecodes())
2178         dumpBytecode();
2179     
2180     m_heap->m_codeBlocks.add(this);
2181     m_heap->reportExtraMemoryAllocated(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
2182 }
2183
2184 CodeBlock::~CodeBlock()
2185 {
2186     if (m_vm->m_perBytecodeProfiler)
2187         m_vm->m_perBytecodeProfiler->notifyDestruction(this);
2188     
2189 #if ENABLE(VERBOSE_VALUE_PROFILE)
2190     dumpValueProfiles();
2191 #endif
2192     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2193         m_incomingLLIntCalls.begin()->remove();
2194 #if ENABLE(JIT)
2195     // We may be destroyed before any CodeBlocks that refer to us are destroyed.
2196     // Consider that two CodeBlocks become unreachable at the same time. There
2197     // is no guarantee about the order in which the CodeBlocks are destroyed.
2198     // So, if we don't remove incoming calls, and get destroyed before the
2199     // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
2200     // destructor will try to remove nodes from our (no longer valid) linked list.
2201     while (m_incomingCalls.begin() != m_incomingCalls.end())
2202         m_incomingCalls.begin()->remove();
2203     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
2204         m_incomingPolymorphicCalls.begin()->remove();
2205     
2206     // Note that our outgoing calls will be removed from other CodeBlocks'
2207     // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
2208     // destructors.
2209
2210     for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
2211         (*iter)->deref();
2212 #endif // ENABLE(JIT)
2213 }
2214
2215 void CodeBlock::setNumParameters(int newValue)
2216 {
2217     m_numParameters = newValue;
2218
2219     m_argumentValueProfiles.resizeToFit(newValue);
2220 }
2221
2222 void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
2223 {
2224     EvalCacheMap::iterator end = m_cacheMap.end();
2225     for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
2226         visitor.append(&ptr->value);
2227 }
2228
2229 CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
2230 {
2231 #if ENABLE(FTL_JIT)
2232     if (jitType() != JITCode::DFGJIT)
2233         return 0;
2234     DFG::JITCode* jitCode = m_jitCode->dfg();
2235     return jitCode->osrEntryBlock.get();
2236 #else // ENABLE(FTL_JIT)
2237     return 0;
2238 #endif // ENABLE(FTL_JIT)
2239 }
2240
2241 void CodeBlock::visitAggregate(SlotVisitor& visitor)
2242 {
2243 #if ENABLE(PARALLEL_GC)
2244     // I may be asked to scan myself more than once, and it may even happen concurrently.
2245     // To this end, use an atomic operation to check (and set) if I've been called already.
2246     // Only one thread may proceed past this point - whichever one wins the atomic set race.
2247     bool setByMe = m_visitAggregateHasBeenCalled.compareExchangeStrong(false, true);
2248     if (!setByMe)
2249         return;
2250 #endif // ENABLE(PARALLEL_GC)
2251     
2252     if (!!m_alternative)
2253         m_alternative->visitAggregate(visitor);
2254     
2255     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2256         otherBlock->visitAggregate(visitor);
2257
2258     visitor.reportExtraMemoryVisited(ownerExecutable(), sizeof(CodeBlock));
2259     if (m_jitCode)
2260         visitor.reportExtraMemoryVisited(ownerExecutable(), m_jitCode->size());
2261     if (m_instructions.size()) {
2262         // Divide by refCount() because m_instructions points to something that is shared
2263         // by multiple CodeBlocks, and we only want to count it towards the heap size once.
2264         // Having each CodeBlock report only its proportional share of the size is one way
2265         // of accomplishing this.
2266         visitor.reportExtraMemoryVisited(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
2267     }
2268
2269     visitor.append(&m_unlinkedCode);
2270
2271     // There are three things that may use unconditional finalizers: lazy bytecode freeing,
2272     // inline cache clearing, and jettisoning. The probability of us wanting to do at
2273     // least one of those things is probably quite close to 1. So we add one no matter what
2274     // and when it runs, it figures out whether it has any work to do.
2275     visitor.addUnconditionalFinalizer(this);
2276     
2277     m_allTransitionsHaveBeenMarked = false;
2278     
2279     if (shouldImmediatelyAssumeLivenessDuringScan()) {
2280         // This code block is live, so scan all references strongly and return.
2281         stronglyVisitStrongReferences(visitor);
2282         stronglyVisitWeakReferences(visitor);
2283         propagateTransitions(visitor);
2284         return;
2285     }
2286     
2287     // There are two things that we use weak reference harvesters for: DFG fixpoint for
2288     // jettisoning, and trying to find structures that would be live based on some
2289     // inline cache. So it makes sense to register them regardless.
2290     visitor.addWeakReferenceHarvester(this);
2291
2292 #if ENABLE(DFG_JIT)
2293     // We get here if we're live in the sense that our owner executable is live,
2294     // but we're not yet live for sure in another sense: we may yet decide that this
2295     // code block should be jettisoned based on its outgoing weak references being
2296     // stale. Set a flag to indicate that we're still assuming that we're dead, and
2297     // perform one round of determining if we're live. The GC may determine, based on
2298     // either us marking additional objects, or by other objects being marked for
2299     // other reasons, that this iteration should run again; it will notify us of this
2300     // decision by calling harvestWeakReferences().
2301     
2302     m_jitCode->dfgCommon()->livenessHasBeenProved = false;
2303     
2304     propagateTransitions(visitor);
2305     determineLiveness(visitor);
2306 #else // ENABLE(DFG_JIT)
2307     RELEASE_ASSERT_NOT_REACHED();
2308 #endif // ENABLE(DFG_JIT)
2309 }
2310
2311 bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan()
2312 {
2313 #if ENABLE(DFG_JIT)
2314     // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
2315     // their weak references go stale. So if a basline JIT CodeBlock gets
2316     // scanned, we can assume that this means that it's live.
2317     if (!JITCode::isOptimizingJIT(jitType()))
2318         return true;
2319
2320     // For simplicity, we don't attempt to jettison code blocks during GC if
2321     // they are executing. Instead we strongly mark their weak references to
2322     // allow them to continue to execute soundly.
2323     if (m_mayBeExecuting)
2324         return true;
2325
2326     if (Options::forceDFGCodeBlockLiveness())
2327         return true;
2328
2329     return false;
2330 #else
2331     return true;
2332 #endif
2333 }
2334
2335 bool CodeBlock::isKnownToBeLiveDuringGC()
2336 {
2337 #if ENABLE(DFG_JIT)
2338     // This should return true for:
2339     // - Code blocks that behave like normal objects - i.e. if they are referenced then they
2340     //   are live.
2341     // - Code blocks that were running on the stack.
2342     // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
2343     //   because either livenessHasBeenProved would have survived as true or m_mayBeExecuting
2344     //   would survive as true.
2345     // - Code blocks that don't have any dead weak references.
2346     
2347     return shouldImmediatelyAssumeLivenessDuringScan()
2348         || m_jitCode->dfgCommon()->livenessHasBeenProved;
2349 #else
2350     return true;
2351 #endif
2352 }
2353
2354 #if ENABLE(DFG_JIT)
2355 static bool shouldMarkTransition(DFG::WeakReferenceTransition& transition)
2356 {
2357     if (transition.m_codeOrigin && !Heap::isMarked(transition.m_codeOrigin.get()))
2358         return false;
2359     
2360     if (!Heap::isMarked(transition.m_from.get()))
2361         return false;
2362     
2363     return true;
2364 }
2365 #endif // ENABLE(DFG_JIT)
2366
2367 void CodeBlock::propagateTransitions(SlotVisitor& visitor)
2368 {
2369     UNUSED_PARAM(visitor);
2370
2371     if (m_allTransitionsHaveBeenMarked)
2372         return;
2373
2374     bool allAreMarkedSoFar = true;
2375         
2376     Interpreter* interpreter = m_vm->interpreter;
2377     if (jitType() == JITCode::InterpreterThunk) {
2378         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2379         for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2380             Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2381             switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2382             case op_put_by_id_transition_direct:
2383             case op_put_by_id_transition_normal:
2384             case op_put_by_id_transition_direct_out_of_line:
2385             case op_put_by_id_transition_normal_out_of_line: {
2386                 if (Heap::isMarked(instruction[4].u.structure.get()))
2387                     visitor.append(&instruction[6].u.structure);
2388                 else
2389                     allAreMarkedSoFar = false;
2390                 break;
2391             }
2392             default:
2393                 break;
2394             }
2395         }
2396     }
2397
2398 #if ENABLE(JIT)
2399     if (JITCode::isJIT(jitType())) {
2400         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2401             StructureStubInfo& stubInfo = **iter;
2402             switch (stubInfo.accessType) {
2403             case access_put_by_id_transition_normal:
2404             case access_put_by_id_transition_direct: {
2405                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2406                 if ((!origin || Heap::isMarked(origin))
2407                     && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
2408                     visitor.append(&stubInfo.u.putByIdTransition.structure);
2409                 else
2410                     allAreMarkedSoFar = false;
2411                 break;
2412             }
2413
2414             case access_put_by_id_list: {
2415                 PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
2416                 JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2417                 if (origin && !Heap::isMarked(origin)) {
2418                     allAreMarkedSoFar = false;
2419                     break;
2420                 }
2421                 for (unsigned j = list->size(); j--;) {
2422                     PutByIdAccess& access = list->m_list[j];
2423                     if (!access.isTransition())
2424                         continue;
2425                     if (Heap::isMarked(access.oldStructure()))
2426                         visitor.append(&access.m_newStructure);
2427                     else
2428                         allAreMarkedSoFar = false;
2429                 }
2430                 break;
2431             }
2432             
2433             default:
2434                 break;
2435             }
2436         }
2437     }
2438 #endif // ENABLE(JIT)
2439     
2440 #if ENABLE(DFG_JIT)
2441     if (JITCode::isOptimizingJIT(jitType())) {
2442         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2443         
2444         for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2445             if (shouldMarkTransition(dfgCommon->transitions[i])) {
2446                 // If the following three things are live, then the target of the
2447                 // transition is also live:
2448                 //
2449                 // - This code block. We know it's live already because otherwise
2450                 //   we wouldn't be scanning ourselves.
2451                 //
2452                 // - The code origin of the transition. Transitions may arise from
2453                 //   code that was inlined. They are not relevant if the user's
2454                 //   object that is required for the inlinee to run is no longer
2455                 //   live.
2456                 //
2457                 // - The source of the transition. The transition checks if some
2458                 //   heap location holds the source, and if so, stores the target.
2459                 //   Hence the source must be live for the transition to be live.
2460                 //
2461                 // We also short-circuit the liveness if the structure is harmless
2462                 // to mark (i.e. its global object and prototype are both already
2463                 // live).
2464                 
2465                 visitor.append(&dfgCommon->transitions[i].m_to);
2466             } else
2467                 allAreMarkedSoFar = false;
2468         }
2469     }
2470 #endif // ENABLE(DFG_JIT)
2471     
2472     if (allAreMarkedSoFar)
2473         m_allTransitionsHaveBeenMarked = true;
2474 }
2475
2476 void CodeBlock::determineLiveness(SlotVisitor& visitor)
2477 {
2478     UNUSED_PARAM(visitor);
2479     
2480     if (shouldImmediatelyAssumeLivenessDuringScan())
2481         return;
2482     
2483 #if ENABLE(DFG_JIT)
2484     // Check if we have any remaining work to do.
2485     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2486     if (dfgCommon->livenessHasBeenProved)
2487         return;
2488     
2489     // Now check all of our weak references. If all of them are live, then we
2490     // have proved liveness and so we scan our strong references. If at end of
2491     // GC we still have not proved liveness, then this code block is toast.
2492     bool allAreLiveSoFar = true;
2493     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2494         if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2495             allAreLiveSoFar = false;
2496             break;
2497         }
2498     }
2499     if (allAreLiveSoFar) {
2500         for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) {
2501             if (!Heap::isMarked(dfgCommon->weakStructureReferences[i].get())) {
2502                 allAreLiveSoFar = false;
2503                 break;
2504             }
2505         }
2506     }
2507     
2508     // If some weak references are dead, then this fixpoint iteration was
2509     // unsuccessful.
2510     if (!allAreLiveSoFar)
2511         return;
2512     
2513     // All weak references are live. Record this information so we don't
2514     // come back here again, and scan the strong references.
2515     dfgCommon->livenessHasBeenProved = true;
2516     stronglyVisitStrongReferences(visitor);
2517 #endif // ENABLE(DFG_JIT)
2518 }
2519
2520 void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2521 {
2522     propagateTransitions(visitor);
2523     determineLiveness(visitor);
2524 }
2525
2526 void CodeBlock::finalizeUnconditionally()
2527 {
2528     Interpreter* interpreter = m_vm->interpreter;
2529     if (JITCode::couldBeInterpreted(jitType())) {
2530         const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2531         for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2532             Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2533             switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2534             case op_get_by_id:
2535             case op_get_by_id_out_of_line:
2536             case op_put_by_id:
2537             case op_put_by_id_out_of_line:
2538                 if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2539                     break;
2540                 if (Options::verboseOSR())
2541                     dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2542                 curInstruction[4].u.structure.clear();
2543                 curInstruction[5].u.operand = 0;
2544                 break;
2545             case op_put_by_id_transition_direct:
2546             case op_put_by_id_transition_normal:
2547             case op_put_by_id_transition_direct_out_of_line:
2548             case op_put_by_id_transition_normal_out_of_line:
2549                 if (Heap::isMarked(curInstruction[4].u.structure.get())
2550                     && Heap::isMarked(curInstruction[6].u.structure.get())
2551                     && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2552                     break;
2553                 if (Options::verboseOSR()) {
2554                     dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2555                             curInstruction[4].u.structure.get(),
2556                             curInstruction[6].u.structure.get(),
2557                             curInstruction[7].u.structureChain.get());
2558                 }
2559                 curInstruction[4].u.structure.clear();
2560                 curInstruction[6].u.structure.clear();
2561                 curInstruction[7].u.structureChain.clear();
2562                 curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2563                 break;
2564             case op_get_array_length:
2565                 break;
2566             case op_to_this:
2567                 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2568                     break;
2569                 if (Options::verboseOSR())
2570                     dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2571                 curInstruction[2].u.structure.clear();
2572                 curInstruction[3].u.toThisStatus = merge(
2573                     curInstruction[3].u.toThisStatus, ToThisClearedByGC);
2574                 break;
2575             case op_create_this: {
2576                 auto& cacheWriteBarrier = curInstruction[4].u.jsCell;
2577                 if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects())
2578                     break;
2579                 JSCell* cachedFunction = cacheWriteBarrier.get();
2580                 if (Heap::isMarked(cachedFunction))
2581                     break;
2582                 if (Options::verboseOSR())
2583                     dataLogF("Clearing LLInt create_this with cached callee %p.\n", cachedFunction);
2584                 cacheWriteBarrier.clear();
2585                 break;
2586             }
2587             case op_resolve_scope: {
2588                 // Right now this isn't strictly necessary. Any symbol tables that this will refer to
2589                 // are for outer functions, and we refer to those functions strongly, and they refer
2590                 // to the symbol table strongly. But it's nice to be on the safe side.
2591                 WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable;
2592                 if (!symbolTable || Heap::isMarked(symbolTable.get()))
2593                     break;
2594                 if (Options::verboseOSR())
2595                     dataLogF("Clearing dead symbolTable %p.\n", symbolTable.get());
2596                 symbolTable.clear();
2597                 break;
2598             }
2599             case op_get_from_scope:
2600             case op_put_to_scope: {
2601                 ResolveModeAndType modeAndType =
2602                     ResolveModeAndType(curInstruction[4].u.operand);
2603                 if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks || modeAndType.type() == LocalClosureVar)
2604                     continue;
2605                 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2606                 if (!structure || Heap::isMarked(structure.get()))
2607                     break;
2608                 if (Options::verboseOSR())
2609                     dataLogF("Clearing scope access with structure %p.\n", structure.get());
2610                 structure.clear();
2611                 break;
2612             }
2613             default:
2614                 OpcodeID opcodeID = interpreter->getOpcodeID(curInstruction[0].u.opcode);
2615                 ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]);
2616             }
2617         }
2618
2619         for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2620             if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2621                 if (Options::verboseOSR())
2622                     dataLog("Clearing LLInt call from ", *this, "\n");
2623                 m_llintCallLinkInfos[i].unlink();
2624             }
2625             if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2626                 m_llintCallLinkInfos[i].lastSeenCallee.clear();
2627         }
2628     }
2629
2630 #if ENABLE(DFG_JIT)
2631     // Check if we're not live. If we are, then jettison.
2632     if (!isKnownToBeLiveDuringGC()) {
2633         if (Options::verboseOSR())
2634             dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2635
2636         if (DFG::shouldShowDisassembly()) {
2637             dataLog(*this, " will be jettisoned because of the following dead references:\n");
2638             DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2639             for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2640                 DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
2641                 JSCell* origin = transition.m_codeOrigin.get();
2642                 JSCell* from = transition.m_from.get();
2643                 JSCell* to = transition.m_to.get();
2644                 if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2645                     continue;
2646                 dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2647             }
2648             for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2649                 JSCell* weak = dfgCommon->weakReferences[i].get();
2650                 if (Heap::isMarked(weak))
2651                     continue;
2652                 dataLog("    Weak reference ", RawPointer(weak), ".\n");
2653             }
2654         }
2655         
2656         jettison(Profiler::JettisonDueToWeakReference);
2657         return;
2658     }
2659 #endif // ENABLE(DFG_JIT)
2660
2661 #if ENABLE(JIT)
2662     // Handle inline caches.
2663     if (!!jitCode()) {
2664         RepatchBuffer repatchBuffer(this);
2665         
2666         for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
2667             (*iter)->visitWeak(repatchBuffer);
2668
2669         for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2670             StructureStubInfo& stubInfo = **iter;
2671             
2672             if (stubInfo.visitWeakReferences(repatchBuffer))
2673                 continue;
2674             
2675             resetStubDuringGCInternal(repatchBuffer, stubInfo);
2676         }
2677     }
2678 #endif
2679 }
2680
2681 void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
2682 {
2683 #if ENABLE(JIT)
2684     toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2685 #else
2686     UNUSED_PARAM(result);
2687 #endif
2688 }
2689
2690 void CodeBlock::getStubInfoMap(StubInfoMap& result)
2691 {
2692     ConcurrentJITLocker locker(m_lock);
2693     getStubInfoMap(locker, result);
2694 }
2695
2696 void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
2697 {
2698 #if ENABLE(JIT)
2699     toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
2700 #else
2701     UNUSED_PARAM(result);
2702 #endif
2703 }
2704
2705 void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
2706 {
2707     ConcurrentJITLocker locker(m_lock);
2708     getCallLinkInfoMap(locker, result);
2709 }
2710
2711 #if ENABLE(JIT)
2712 StructureStubInfo* CodeBlock::addStubInfo()
2713 {
2714     ConcurrentJITLocker locker(m_lock);
2715     return m_stubInfos.add();
2716 }
2717
2718 StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
2719 {
2720     for (StructureStubInfo* stubInfo : m_stubInfos) {
2721         if (stubInfo->codeOrigin == codeOrigin)
2722             return stubInfo;
2723     }
2724     return nullptr;
2725 }
2726
2727 CallLinkInfo* CodeBlock::addCallLinkInfo()
2728 {
2729     ConcurrentJITLocker locker(m_lock);
2730     return m_callLinkInfos.add();
2731 }
2732
2733 void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2734 {
2735     if (stubInfo.accessType == access_unset)
2736         return;
2737     
2738     ConcurrentJITLocker locker(m_lock);
2739     
2740     RepatchBuffer repatchBuffer(this);
2741     resetStubInternal(repatchBuffer, stubInfo);
2742 }
2743
2744 void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2745 {
2746     AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2747     
2748     if (Options::verboseOSR()) {
2749         // This can be called from GC destructor calls, so we don't try to do a full dump
2750         // of the CodeBlock.
2751         dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
2752     }
2753     
2754     RELEASE_ASSERT(JITCode::isJIT(jitType()));
2755     
2756     if (isGetByIdAccess(accessType))
2757         resetGetByID(repatchBuffer, stubInfo);
2758     else if (isPutByIdAccess(accessType))
2759         resetPutByID(repatchBuffer, stubInfo);
2760     else {
2761         RELEASE_ASSERT(isInAccess(accessType));
2762         resetIn(repatchBuffer, stubInfo);
2763     }
2764     
2765     stubInfo.reset();
2766 }
2767
2768 void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2769 {
2770     resetStubInternal(repatchBuffer, stubInfo);
2771     stubInfo.resetByGC = true;
2772 }
2773
2774 CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
2775 {
2776     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2777         if ((*iter)->codeOrigin == CodeOrigin(index))
2778             return *iter;
2779     }
2780     return nullptr;
2781 }
2782 #endif
2783
2784 void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2785 {
2786     visitor.append(&m_globalObject);
2787     visitor.append(&m_ownerExecutable);
2788     visitor.append(&m_symbolTable);
2789     visitor.append(&m_unlinkedCode);
2790     if (m_rareData)
2791         m_rareData->m_evalCodeCache.visitAggregate(visitor);
2792     visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2793     for (size_t i = 0; i < m_functionExprs.size(); ++i)
2794         visitor.append(&m_functionExprs[i]);
2795     for (size_t i = 0; i < m_functionDecls.size(); ++i)
2796         visitor.append(&m_functionDecls[i]);
2797     for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2798         m_objectAllocationProfiles[i].visitAggregate(visitor);
2799
2800 #if ENABLE(DFG_JIT)
2801     if (JITCode::isOptimizingJIT(jitType())) {
2802         DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2803         if (dfgCommon->inlineCallFrames.get())
2804             dfgCommon->inlineCallFrames->visitAggregate(visitor);
2805     }
2806 #endif
2807
2808     updateAllPredictions();
2809 }
2810
2811 void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2812 {
2813     UNUSED_PARAM(visitor);
2814
2815 #if ENABLE(DFG_JIT)
2816     if (!JITCode::isOptimizingJIT(jitType()))
2817         return;
2818     
2819     DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2820
2821     for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2822         if (!!dfgCommon->transitions[i].m_codeOrigin)
2823             visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2824         visitor.append(&dfgCommon->transitions[i].m_from);
2825         visitor.append(&dfgCommon->transitions[i].m_to);
2826     }
2827     
2828     for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2829         visitor.append(&dfgCommon->weakReferences[i]);
2830
2831     for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i)
2832         visitor.append(&dfgCommon->weakStructureReferences[i]);
2833 #endif    
2834 }
2835
2836 CodeBlock* CodeBlock::baselineAlternative()
2837 {
2838 #if ENABLE(JIT)
2839     CodeBlock* result = this;
2840     while (result->alternative())
2841         result = result->alternative();
2842     RELEASE_ASSERT(result);
2843     RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
2844     return result;
2845 #else
2846     return this;
2847 #endif
2848 }
2849
2850 CodeBlock* CodeBlock::baselineVersion()
2851 {
2852 #if ENABLE(JIT)
2853     if (JITCode::isBaselineCode(jitType()))
2854         return this;
2855     CodeBlock* result = replacement();
2856     if (!result) {
2857         // This can happen if we're creating the original CodeBlock for an executable.
2858         // Assume that we're the baseline CodeBlock.
2859         RELEASE_ASSERT(jitType() == JITCode::None);
2860         return this;
2861     }
2862     result = result->baselineAlternative();
2863     return result;
2864 #else
2865     return this;
2866 #endif
2867 }
2868
2869 #if ENABLE(JIT)
2870 bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
2871 {
2872     return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
2873 }
2874
2875 bool CodeBlock::hasOptimizedReplacement()
2876 {
2877     return hasOptimizedReplacement(jitType());
2878 }
2879 #endif
2880
2881 HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
2882 {
2883     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2884
2885     if (!m_rareData)
2886         return 0;
2887     
2888     Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2889     for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2890         HandlerInfo& handler = exceptionHandlers[i];
2891         // Handlers are ordered innermost first, so the first handler we encounter
2892         // that contains the source address is the correct handler to use.
2893         if (handler.start <= bytecodeOffset && handler.end > bytecodeOffset)
2894             return &handler;
2895     }
2896
2897     return 0;
2898 }
2899
2900 unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2901 {
2902     RELEASE_ASSERT(bytecodeOffset < instructions().size());
2903     return m_ownerExecutable->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2904 }
2905
2906 unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2907 {
2908     int divot;
2909     int startOffset;
2910     int endOffset;
2911     unsigned line;
2912     unsigned column;
2913     expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2914     return column;
2915 }
2916
2917 void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2918 {
2919     m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2920     divot += m_sourceOffset;
2921     column += line ? 1 : firstLineColumnOffset();
2922     line += m_ownerExecutable->firstLine();
2923 }
2924
2925 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
2926 {
2927     Interpreter* interpreter = vm()->interpreter;
2928     const Instruction* begin = instructions().begin();
2929     const Instruction* end = instructions().end();
2930     for (const Instruction* it = begin; it != end;) {
2931         OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
2932         if (opcodeID == op_debug) {
2933             unsigned bytecodeOffset = it - begin;
2934             int unused;
2935             unsigned opDebugLine;
2936             unsigned opDebugColumn;
2937             expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
2938             if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
2939                 return true;
2940         }
2941         it += opcodeLengths[opcodeID];
2942     }
2943     return false;
2944 }
2945
2946 void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2947 {
2948     m_rareCaseProfiles.shrinkToFit();
2949     m_specialFastCaseProfiles.shrinkToFit();
2950     
2951     if (shrinkMode == EarlyShrink) {
2952         m_constantRegisters.shrinkToFit();
2953         m_constantsSourceCodeRepresentation.shrinkToFit();
2954         
2955         if (m_rareData) {
2956             m_rareData->m_switchJumpTables.shrinkToFit();
2957             m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2958         }
2959     } // else don't shrink these, because we would have already pointed pointers into these tables.
2960 }
2961
2962 unsigned CodeBlock::addOrFindConstant(JSValue v)
2963 {
2964     unsigned result;
2965     if (findConstant(v, result))
2966         return result;
2967     return addConstant(v);
2968 }
2969
2970 bool CodeBlock::findConstant(JSValue v, unsigned& index)
2971 {
2972     unsigned numberOfConstants = numberOfConstantRegisters();
2973     for (unsigned i = 0; i < numberOfConstants; ++i) {
2974         if (getConstant(FirstConstantRegisterIndex + i) == v) {
2975             index = i;
2976             return true;
2977         }
2978     }
2979     index = numberOfConstants;
2980     return false;
2981 }
2982
2983 #if ENABLE(JIT)
2984 void CodeBlock::unlinkCalls()
2985 {
2986     if (!!m_alternative)
2987         m_alternative->unlinkCalls();
2988     for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2989         if (m_llintCallLinkInfos[i].isLinked())
2990             m_llintCallLinkInfos[i].unlink();
2991     }
2992     if (m_callLinkInfos.isEmpty())
2993         return;
2994     if (!m_vm->canUseJIT())
2995         return;
2996     RepatchBuffer repatchBuffer(this);
2997     for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2998         CallLinkInfo& info = **iter;
2999         if (!info.isLinked())
3000             continue;
3001         info.unlink(repatchBuffer);
3002     }
3003 }
3004
3005 void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
3006 {
3007     noticeIncomingCall(callerFrame);
3008     m_incomingCalls.push(incoming);
3009 }
3010
3011 void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
3012 {
3013     noticeIncomingCall(callerFrame);
3014     m_incomingPolymorphicCalls.push(incoming);
3015 }
3016 #endif // ENABLE(JIT)
3017
3018 void CodeBlock::unlinkIncomingCalls()
3019 {
3020     while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
3021         m_incomingLLIntCalls.begin()->unlink();
3022 #if ENABLE(JIT)
3023     if (m_incomingCalls.isEmpty() && m_incomingPolymorphicCalls.isEmpty())
3024         return;
3025     RepatchBuffer repatchBuffer(this);
3026     while (m_incomingCalls.begin() != m_incomingCalls.end())
3027         m_incomingCalls.begin()->unlink(repatchBuffer);
3028     while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
3029         m_incomingPolymorphicCalls.begin()->unlink(repatchBuffer);
3030 #endif // ENABLE(JIT)
3031 }
3032
3033 void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
3034 {
3035     noticeIncomingCall(callerFrame);
3036     m_incomingLLIntCalls.push(incoming);
3037 }
3038
3039 void CodeBlock::clearEvalCache()
3040 {
3041     if (!!m_alternative)
3042         m_alternative->clearEvalCache();
3043     if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
3044         otherBlock->clearEvalCache();
3045     if (!m_rareData)
3046         return;
3047     m_rareData->m_evalCodeCache.clear();
3048 }
3049
3050 void CodeBlock::install()
3051 {
3052     ownerExecutable()->installCode(this);
3053 }
3054
3055 PassRefPtr<CodeBlock> CodeBlock::newReplacement()
3056 {
3057     return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
3058 }
3059
3060 #if ENABLE(JIT)
3061 CodeBlock* ProgramCodeBlock::replacement()
3062 {
3063     return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
3064 }
3065
3066 CodeBlock* EvalCodeBlock::replacement()
3067 {
3068     return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
3069 }
3070
3071 CodeBlock* FunctionCodeBlock::replacement()
3072 {
3073     return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
3074 }
3075
3076 DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
3077 {
3078     return DFG::programCapabilityLevel(this);
3079 }
3080
3081 DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
3082 {
3083     return DFG::evalCapabilityLevel(this);
3084 }
3085
3086 DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
3087 {
3088     if (m_isConstructor)
3089         return DFG::functionForConstructCapabilityLevel(this);
3090     return DFG::functionForCallCapabilityLevel(this);
3091 }
3092 #endif
3093
3094 void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail)
3095 {
3096     RELEASE_ASSERT(reason != Profiler::NotJettisoned);
3097     
3098 #if ENABLE(DFG_JIT)
3099     if (DFG::shouldShowDisassembly()) {
3100         dataLog("Jettisoning ", *this);
3101         if (mode == CountReoptimization)
3102             dataLog(" and counting reoptimization");
3103         dataLog(" due to ", reason);
3104         if (detail)
3105             dataLog(", ", *detail);
3106         dataLog(".\n");
3107     }
3108     
3109     DeferGCForAWhile deferGC(*m_heap);
3110     RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
3111     
3112     if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
3113         compilation->setJettisonReason(reason, detail);
3114     
3115     // We want to accomplish two things here:
3116     // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
3117     //    we should OSR exit at the top of the next bytecode instruction after the return.
3118     // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
3119     
3120     // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
3121     // whether the invalidation has already happened.
3122     if (!jitCode()->dfgCommon()->invalidate()) {
3123         // Nothing to do since we've already been invalidated. That means that we cannot be
3124         // the optimized replacement.
3125         RELEASE_ASSERT(this != replacement());
3126         return;
3127     }
3128     
3129     if (DFG::shouldShowDisassembly())
3130         dataLog("    Did invalidate ", *this, "\n");
3131     
3132     // Count the reoptimization if that's what the user wanted.
3133     if (mode == CountReoptimization) {
3134         // FIXME: Maybe this should call alternative().
3135         // https://bugs.webkit.org/show_bug.cgi?id=123677
3136         baselineAlternative()->countReoptimization();
3137         if (DFG::shouldShowDisassembly())
3138             dataLog("    Did count reoptimization for ", *this, "\n");
3139     }
3140     
3141     // Now take care of the entrypoint.
3142     if (this != replacement()) {
3143         // This means that we were never the entrypoint. This can happen for OSR entry code
3144         // blocks.
3145         return;
3146     }
3147     alternative()->optimizeAfterWarmUp();
3148     tallyFrequentExitSites();
3149     alternative()->install();
3150     if (DFG::shouldShowDisassembly())
3151         dataLog("    Did install baseline version of ", *this, "\n");
3152 #else // ENABLE(DFG_JIT)
3153     UNUSED_PARAM(mode);
3154     UNUSED_PARAM(detail);
3155     UNREACHABLE_FOR_PLATFORM();
3156 #endif // ENABLE(DFG_JIT)
3157 }
3158
3159 JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
3160 {
3161     if (!codeOrigin.inlineCallFrame)
3162         return globalObject();
3163     return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
3164 }
3165
3166 class RecursionCheckFunctor {
3167 public:
3168     RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck)
3169         : m_startCallFrame(startCallFrame)
3170         , m_codeBlock(codeBlock)
3171         , m_depthToCheck(depthToCheck)
3172         , m_foundStartCallFrame(false)
3173         , m_didRecurse(false)
3174     { }
3175
3176     StackVisitor::Status operator()(StackVisitor& visitor)
3177     {
3178         CallFrame* currentCallFrame = visitor->callFrame();
3179
3180         if (currentCallFrame == m_startCallFrame)
3181             m_foundStartCallFrame = true;
3182
3183         if (m_foundStartCallFrame) {
3184             if (visitor->callFrame()->codeBlock() == m_codeBlock) {
3185                 m_didRecurse = true;
3186                 return StackVisitor::Done;
3187             }
3188
3189             if (!m_depthToCheck--)
3190                 return StackVisitor::Done;
3191         }
3192
3193         return StackVisitor::Continue;
3194     }
3195
3196     bool didRecurse() const { return m_didRecurse; }
3197
3198 private:
3199     CallFrame* m_startCallFrame;
3200     CodeBlock* m_codeBlock;
3201     unsigned m_depthToCheck;
3202     bool m_foundStartCallFrame;
3203     bool m_didRecurse;
3204 };
3205
3206 void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
3207 {
3208     CodeBlock* callerCodeBlock = callerFrame->codeBlock();
3209     
3210     if (Options::verboseCallLink())
3211         dataLog("Noticing call link from ", pointerDump(callerCodeBlock), " to ", *this, "\n");
3212     
3213 #if ENABLE(DFG_JIT)
3214     if (!m_shouldAlwaysBeInlined)
3215         return;
3216     
3217     if (!callerCodeBlock) {
3218         m_shouldAlwaysBeInlined = false;
3219         if (Options::verboseCallLink())
3220             dataLog("    Clearing SABI because caller is native.\n");
3221         return;
3222     }
3223
3224     if (!hasBaselineJITProfiling())
3225         return;
3226
3227     if (!DFG::mightInlineFunction(this))
3228         return;
3229
3230     if (!canInline(m_capabilityLevelState))
3231         return;
3232     
3233     if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
3234         m_shouldAlwaysBeInlined = false;
3235         if (Options::verboseCallLink())
3236             dataLog("    Clearing SABI because caller is too large.\n");
3237         return;
3238     }
3239
3240     if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
3241         // If the caller is still in the interpreter, then we can't expect inlining to
3242         // happen anytime soon. Assume it's profitable to optimize it separately. This
3243         // ensures that a function is SABI only if it is called no more frequently than
3244         // any of its callers.
3245         m_shouldAlwaysBeInlined = false;
3246         if (Options::verboseCallLink())
3247             dataLog("    Clearing SABI because caller is in LLInt.\n");
3248         return;
3249     }
3250     
3251     if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) {
3252         m_shouldAlwaysBeInlined = false;
3253         if (Options::verboseCallLink())
3254             dataLog("    Clearing SABI bcause caller was already optimized.\n");
3255         return;
3256     }
3257     
3258     if (callerCodeBlock->codeType() != FunctionCode) {
3259         // If the caller is either eval or global code, assume that that won't be
3260         // optimized anytime soon. For eval code this is particularly true since we
3261         // delay eval optimization by a *lot*.
3262         m_shouldAlwaysBeInlined = false;
3263         if (Options::verboseCallLink())
3264             dataLog("    Clearing SABI because caller is not a function.\n");
3265         return;
3266     }
3267
3268     // Recursive calls won't be inlined.
3269     RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth());
3270     vm()->topCallFrame->iterate(functor);
3271
3272     if (functor.didRecurse()) {
3273         if (Options::verboseCallLink())
3274             dataLog("    Clearing SABI because recursion was detected.\n");
3275         m_shouldAlwaysBeInlined = false;
3276         return;
3277     }
3278     
3279     if (callerCodeBlock->m_capabilityLevelState == DFG::CapabilityLevelNotSet) {
3280         dataLog("In call from ", *callerCodeBlock, " ", callerFrame->codeOrigin(), " to ", *this, ": caller's DFG capability level is not set.\n");
3281         CRASH();
3282     }
3283     
3284     if (canCompile(callerCodeBlock->m_capabilityLevelState))
3285         return;
3286     
3287     if (Options::verboseCallLink())
3288         dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
3289     
3290     m_shouldAlwaysBeInlined = false;
3291 #endif
3292 }
3293
3294 unsigned CodeBlock::reoptimizationRetryCounter() const
3295 {
3296 #if ENABLE(JIT)
3297     ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
3298     return m_reoptimizationRetryCounter;
3299 #else
3300     return 0;
3301 #endif // ENABLE(JIT)
3302 }
3303
3304 #if ENABLE(JIT)
3305 void CodeBlock::countReoptimization()
3306 {
3307     m_reoptimizationRetryCounter++;
3308     if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
3309         m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
3310 }
3311
3312 unsigned CodeBlock::numberOfDFGCompiles()
3313 {
3314     ASSERT(JITCode::isBaselineCode(jitType()));
3315     if (Options::testTheFTL()) {
3316         if (m_didFailFTLCompilation)
3317             return 1000000;
3318         return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
3319     }
3320     return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
3321 }
3322
3323 int32_t CodeBlock::codeTypeThresholdMultiplier() const
3324 {
3325     if (codeType() == EvalCode)
3326         return Options::evalThresholdMultiplier();
3327     
3328     return 1;
3329 }
3330
3331 double CodeBlock::optimizationThresholdScalingFactor()
3332 {
3333     // This expression arises from doing a least-squares fit of
3334     //
3335     // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
3336     //
3337     // against the data points:
3338     //
3339     //    x       F[x_]
3340     //    10       0.9          (smallest reasonable code block)
3341     //   200       1.0          (typical small-ish code block)
3342     //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
3343     //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
3344     //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
3345     // 10000       6.0          (similar to above)
3346     //
3347     // I achieve the minimization using the following Mathematica code:
3348     //
3349     // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
3350     //
3351     // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
3352     //
3353     // solution = 
3354     //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
3355     //         {a, b, c, d}][[2]]
3356     //
3357     // And the code below (to initialize a, b, c, d) is generated by:
3358     //
3359     // Print["const double " <> ToString[#[[1]]] <> " = " <>
3360     //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
3361     //
3362     // We've long known the following to be true:
3363     // - Small code blocks are cheap to optimize and so we should do it sooner rather
3364     //   than later.
3365     // - Large code blocks are expensive to optimize and so we should postpone doing so,
3366     //   and sometimes have a large enough threshold that we never optimize them.
3367     // - The difference in cost is not totally linear because (a) just invoking the
3368     //   DFG incurs some base cost and (b) for large code blocks there is enough slop
3369     //   in the correlation between instruction count and the actual compilation cost
3370     //   that for those large blocks, the instruction count should not have a strong
3371     //   influence on our threshold.
3372     //
3373     // I knew the goals but I didn't know how to achieve them; so I picked an interesting
3374     // example where the heuristics were right (code block in 3d-cube with instruction
3375     // count 320, which got compiled early as it should have been) and one where they were
3376     // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
3377     // to compile and didn't run often enough to warrant compilation in my opinion), and
3378     // then threw in additional data points that represented my own guess of what our
3379     // heuristics should do for some round-numbered examples.
3380     //
3381     // The expression to which I decided to fit the data arose because I started with an
3382     // affine function, and then did two things: put the linear part in an Abs to ensure
3383     // that the fit didn't end up choosing a negative value of c (which would result in
3384     // the function turning over and going negative for large x) and I threw in a Sqrt
3385     // term because Sqrt represents my intution that the function should be more sensitive
3386     // to small changes in small values of x, but less sensitive when x gets large.
3387     
3388     // Note that the current fit essentially eliminates the linear portion of the
3389     // expression (c == 0.0).
3390     const double a = 0.061504;
3391     const double b = 1.02406;
3392     const double c = 0.0;
3393     const double d = 0.825914;
3394     
3395     double instructionCount = this->instructionCount();
3396     
3397     ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
3398     
3399     double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
3400     
3401     result *= codeTypeThresholdMultiplier();
3402     
3403     if (Options::verboseOSR()) {
3404         dataLog(
3405             *this, ": instruction count is ", instructionCount,
3406             ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
3407             "\n");
3408     }
3409     return result;
3410 }
3411
3412 static int32_t clipThreshold(double threshold)
3413 {
3414     if (threshold < 1.0)
3415         return 1;
3416     
3417     if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
3418         return std::numeric_limits<int32_t>::max();
3419     
3420     return static_cast<int32_t>(threshold);
3421 }
3422
3423 int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
3424 {
3425     return clipThreshold(
3426         static_cast<double>(desiredThreshold) *
3427         optimizationThresholdScalingFactor() *
3428         (1 << reoptimizationRetryCounter()));
3429 }
3430
3431 bool CodeBlock::checkIfOptimizationThresholdReached()
3432 {
3433 #if ENABLE(DFG_JIT)
3434     if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
3435         if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
3436             == DFG::Worklist::Compiled) {
3437             optimizeNextInvocation();
3438             return true;
3439         }
3440     }
3441 #endif
3442     
3443     return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
3444 }
3445
3446 void CodeBlock::optimizeNextInvocation()
3447 {
3448     if (Options::verboseOSR())
3449         dataLog(*this, ": Optimizing next invocation.\n");
3450     m_jitExecuteCounter.setNewThreshold(0, this);
3451 }
3452
3453 void CodeBlock::dontOptimizeAnytimeSoon()
3454 {
3455     if (Options::verboseOSR())
3456         dataLog(*this, ": Not optimizing anytime soon.\n");
3457     m_jitExecuteCounter.deferIndefinitely();
3458 }
3459
3460 void CodeBlock::optimizeAfterWarmUp()
3461 {
3462     if (Options::verboseOSR())
3463         dataLog(*this, ": Optimizing after warm-up.\n");
3464 #if ENABLE(DFG_JIT)
3465     m_jitExecuteCounter.setNewThreshold(
3466         adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
3467 #endif
3468 }
3469
3470 void CodeBlock::optimizeAfterLongWarmUp()
3471 {
3472     if (Options::verboseOSR())
3473         dataLog(*this, ": Optimizing after long warm-up.\n");
3474 #if ENABLE(DFG_JIT)
3475     m_jitExecuteCounter.setNewThreshold(
3476         adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
3477 #endif
3478 }
3479
3480 void CodeBlock::optimizeSoon()
3481 {
3482     if (Options::verboseOSR())
3483         dataLog(*this, ": Optimizing soon.\n");
3484 #if ENABLE(DFG_JIT)
3485     m_jitExecuteCounter.setNewThreshold(
3486         adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
3487 #endif
3488 }
3489
3490 void CodeBlock::forceOptimizationSlowPathConcurrently()
3491 {
3492     if (Options::verboseOSR())
3493         dataLog(*this, ": Forcing slow path concurrently.\n");
3494     m_jitExecuteCounter.forceSlowPathConcurrently();
3495 }
3496
3497 #if ENABLE(DFG_JIT)
3498 void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
3499 {
3500     JITCode::JITType type = jitType();
3501     if (type != JITCode::BaselineJIT) {
3502         dataLog(*this, ": expected to have baseline code but have ", type, "\n");
3503         RELEASE_ASSERT_NOT_REACHED();
3504     }
3505     
3506     CodeBlock* theReplacement = replacement();
3507     if ((result == CompilationSuccessful) != (theReplacement != this)) {
3508         dataLog(*this, ": we have result = ", result, " but ");
3509         if (theReplacement == this)
3510             dataLog("we are our own replacement.\n");
3511         else
3512             dataLog("our replacement is ", pointerDump(theReplacement), "\n");
3513         RELEASE_ASSERT_NOT_REACHED();
3514     }
3515     
3516     switch (result) {
3517     case CompilationSuccessful:
3518         RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3519         optimizeNextInvocation();
3520         return;
3521     case CompilationFailed:
3522         dontOptimizeAnytimeSoon();
3523         return;
3524     case CompilationDeferred:
3525         // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3526         // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3527         // necessarily guarantee anything. So, we make sure that even if that
3528         // function ends up being a no-op, we still eventually retry and realize
3529         // that we have optimized code ready.
3530         optimizeAfterWarmUp();
3531         return;
3532     case CompilationInvalidated:
3533         // Retry with exponential backoff.
3534         countReoptimization();
3535         optimizeAfterWarmUp();
3536         return;
3537     }
3538     
3539     dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
3540     RELEASE_ASSERT_NOT_REACHED();
3541 }
3542
3543 #endif
3544     
3545 uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
3546 {
3547     ASSERT(JITCode::isOptimizingJIT(jitType()));
3548     // Compute this the lame way so we don't saturate. This is called infrequently
3549     // enough that this loop won't hurt us.
3550     unsigned result = desiredThreshold;
3551     for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
3552         unsigned newResult = result << 1;
3553         if (newResult < result)
3554             return std::numeric_limits<uint32_t>::max();
3555         result = newResult;
3556     }
3557     return result;
3558 }
3559
3560 uint32_t CodeBlock::exitCountThresholdForReoptimization()
3561 {
3562     return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3563 }
3564
3565 uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3566 {
3567     return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3568 }
3569
3570 bool CodeBlock::shouldReoptimizeNow()
3571 {
3572     return osrExitCounter() >= exitCountThresholdForReoptimization();
3573 }
3574
3575 bool CodeBlock::shouldReoptimizeFromLoopNow()
3576 {
3577     return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3578 }
3579 #endif
3580
3581 ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
3582 {
3583     for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
3584         if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
3585             return &m_arrayProfiles[i];
3586     }
3587     return 0;
3588 }
3589
3590 ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
3591 {
3592     ArrayProfile* result = getArrayProfile(bytecodeOffset);
3593     if (result)
3594         return result;
3595     return addArrayProfile(bytecodeOffset);
3596 }
3597
3598 void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
3599 {
3600     ConcurrentJITLocker locker(m_lock);
3601     
3602     numberOfLiveNonArgumentValueProfiles = 0;
3603     numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3604     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3605         ValueProfile* profile = getFromAllValueProfiles(i);
3606         unsigned numSamples = profile->totalNumberOfSamples();
3607         if (numSamples > ValueProfile::numberOfBuckets)
3608             numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
3609         numberOfSamplesInProfiles += numSamples;
3610         if (profile->m_bytecodeOffset < 0) {
3611             profile->computeUpdatedPrediction(locker);
3612             continue;
3613         }
3614         if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
3615             numberOfLiveNonArgumentValueProfiles++;
3616         profile->computeUpdatedPrediction(locker);
3617     }
3618     
3619 #if ENABLE(DFG_JIT)
3620     m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
3621 #endif
3622 }
3623
3624 void CodeBlock::updateAllValueProfilePredictions()
3625 {
3626     unsigned ignoredValue1, ignoredValue2;
3627     updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
3628 }
3629
3630 void CodeBlock::updateAllArrayPredictions()
3631 {
3632     ConcurrentJITLocker locker(m_lock);
3633     
3634     for (unsigned i = m_arrayProfiles.size(); i--;)
3635         m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
3636     
3637     // Don't count these either, for similar reasons.
3638     for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
3639         m_arrayAllocationProfiles[i].updateIndexingType();
3640 }
3641
3642 void CodeBlock::updateAllPredictions()
3643 {
3644     updateAllValueProfilePredictions();
3645     updateAllArrayPredictions();
3646 }
3647
3648 bool CodeBlock::shouldOptimizeNow()
3649 {
3650     if (Options::verboseOSR())
3651         dataLog("Considering optimizing ", *this, "...\n");
3652
3653     if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
3654         return true;
3655     
3656     updateAllArrayPredictions();
3657     
3658     unsigned numberOfLiveNonArgumentValueProfiles;
3659     unsigned numberOfSamplesInProfiles;
3660     updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
3661
3662     if (Options::verboseOSR()) {
3663         dataLogF(
3664             "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
3665             (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
3666             numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
3667             (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
3668             numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
3669     }
3670
3671     if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
3672         && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
3673         && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
3674         return true;
3675     
3676     ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
3677     m_optimizationDelayCounter++;
3678     optimizeAfterWarmUp();
3679     return false;
3680 }
3681
3682 #if ENABLE(DFG_JIT)
3683 void CodeBlock::tallyFrequentExitSites()
3684 {
3685     ASSERT(JITCode::isOptimizingJIT(jitType()));
3686     ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
3687     
3688     CodeBlock* profiledBlock = alternative();
3689     
3690     switch (jitType()) {
3691     case JITCode::DFGJIT: {
3692         DFG::JITCode* jitCode = m_jitCode->dfg();
3693         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3694             DFG::OSRExit& exit = jitCode->osrExit[i];
3695             exit.considerAddingAsFrequentExitSite(profiledBlock);
3696         }
3697         break;
3698     }
3699
3700 #if ENABLE(FTL_JIT)
3701     case JITCode::FTLJIT: {
3702         // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
3703         // vector contains a totally different type, that just so happens to behave like
3704         // DFG::JITCode::osrExit.
3705         FTL::JITCode* jitCode = m_jitCode->ftl();
3706         for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3707             FTL::OSRExit& exit = jitCode->osrExit[i];
3708             exit.considerAddingAsFrequentExitSite(profiledBlock);
3709         }
3710         break;
3711     }
3712 #endif
3713         
3714     default:
3715         RELEASE_ASSERT_NOT_REACHED();
3716         break;
3717     }
3718 }
3719 #endif // ENABLE(DFG_JIT)
3720
3721 #if ENABLE(VERBOSE_VALUE_PROFILE)
3722 void CodeBlock::dumpValueProfiles()
3723 {
3724     dataLog("ValueProfile for ", *this, ":\n");
3725     for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3726         ValueProfile* profile = getFromAllValueProfiles(i);
3727         if (profile->m_bytecodeOffset < 0) {
3728             ASSERT(profile->m_bytecodeOffset == -1);
3729             dataLogF("   arg = %u: ", i);
3730         } else
3731             dataLogF("   bc = %d: ", profile->m_bytecodeOffset);
3732         if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
3733             dataLogF("<empty>\n");
3734             continue;
3735         }
3736         profile->dump(WTF::dataFile());