OSR entry: delay outer-loop compilation when at inner-loop
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGJITCompiler.cpp
1 /*
2  * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGJITCompiler.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "DFGFailedFinalizer.h"
33 #include "DFGInlineCacheWrapperInlines.h"
34 #include "DFGJITCode.h"
35 #include "DFGJITFinalizer.h"
36 #include "DFGOSRExitCompiler.h"
37 #include "DFGOperations.h"
38 #include "DFGRegisterBank.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DFGSpeculativeJIT.h"
41 #include "DFGThunks.h"
42 #include "JSCInlines.h"
43 #include "JSCJSValueInlines.h"
44 #include "LinkBuffer.h"
45 #include "MaxFrameExtentForSlowPathCall.h"
46 #include "StructureStubInfo.h"
47 #include "VM.h"
48
49 namespace JSC { namespace DFG {
50
51 JITCompiler::JITCompiler(Graph& dfg)
52     : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
53     , m_graph(dfg)
54     , m_jitCode(adoptRef(new JITCode()))
55     , m_blockHeads(dfg.numBlocks())
56     , m_pcToCodeOriginMapBuilder(dfg.m_vm)
57 {
58     if (shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
59         m_disassembler = std::make_unique<Disassembler>(dfg);
60 #if ENABLE(FTL_JIT)
61     m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy);
62     for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes)
63         m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, TierUpEntryTrigger::None);
64 #endif
65 }
66
67 JITCompiler::~JITCompiler()
68 {
69 }
70
71 void JITCompiler::linkOSRExits()
72 {
73     ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
74     if (m_graph.compilation()) {
75         for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
76             OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
77             Vector<Label> labels;
78             if (!info.m_failureJumps.empty()) {
79                 for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
80                     labels.append(info.m_failureJumps.jumps()[j].label());
81             } else
82                 labels.append(info.m_replacementSource);
83             m_exitSiteLabels.append(labels);
84         }
85     }
86     
87     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
88         OSRExit& exit = m_jitCode->osrExit[i];
89         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
90         JumpList& failureJumps = info.m_failureJumps;
91         if (!failureJumps.empty())
92             failureJumps.link(this);
93         else
94             info.m_replacementDestination = label();
95
96         jitAssertHasValidCallFrame();
97         store32(TrustedImm32(i), &vm()->osrExitIndex);
98         exit.setPatchableCodeOffset(patchableJump());
99     }
100 }
101
102 void JITCompiler::compileEntry()
103 {
104     // This code currently matches the old JIT. In the function header we need to
105     // save return address and call frame via the prologue and perform a fast stack check.
106     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
107     // We'll need to convert the remaining cti_ style calls (specifically the stack
108     // check) which will be dependent on stack layout. (We'd need to account for this in
109     // both normal return code and when jumping to an exception handler).
110     emitFunctionPrologue();
111     emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
112 }
113
114 void JITCompiler::compileSetupRegistersForEntry()
115 {
116     emitSaveCalleeSaves();
117     emitMaterializeTagCheckRegisters();    
118 }
119
120 void JITCompiler::compileEntryExecutionFlag()
121 {
122 #if ENABLE(FTL_JIT)
123     if (m_graph.m_plan.canTierUpAndOSREnter())
124         store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry);
125 #endif // ENABLE(FTL_JIT)
126 }
127
128 void JITCompiler::compileBody()
129 {
130     // We generate the speculative code path, followed by OSR exit code to return
131     // to the old JIT code if speculations fail.
132
133     bool compiledSpeculative = m_speculative->compile();
134     ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
135 }
136
137 void JITCompiler::compileExceptionHandlers()
138 {
139     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
140         m_exceptionChecksWithCallFrameRollback.link(this);
141
142         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
143
144         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
145         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
146         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
147         addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
148
149 #if CPU(X86)
150         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
151         poke(GPRInfo::argumentGPR0);
152         poke(GPRInfo::argumentGPR1, 1);
153 #endif
154         m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
155
156         jumpToExceptionHandler();
157     }
158
159     if (!m_exceptionChecks.empty()) {
160         m_exceptionChecks.link(this);
161
162         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
163
164         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
165         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
166         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
167
168 #if CPU(X86)
169         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
170         poke(GPRInfo::argumentGPR0);
171         poke(GPRInfo::argumentGPR1, 1);
172 #endif
173         m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
174
175         jumpToExceptionHandler();
176     }
177 }
178
179 void JITCompiler::link(LinkBuffer& linkBuffer)
180 {
181     // Link the code, populate data in CodeBlock data structures.
182     m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
183     m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
184
185     if (!m_graph.m_plan.inlineCallFrames->isEmpty())
186         m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
187     
188 #if USE(JSVALUE32_64)
189     m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
190 #endif
191     
192     m_graph.registerFrozenValues();
193
194     BitVector usedJumpTables;
195     for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
196         SwitchData& data = **iter;
197         if (!data.didUseJumpTable)
198             continue;
199         
200         if (data.kind == SwitchString)
201             continue;
202         
203         RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
204         
205         usedJumpTables.set(data.switchTableIndex);
206         SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
207         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
208         table.ctiOffsets.grow(table.branchOffsets.size());
209         for (unsigned j = table.ctiOffsets.size(); j--;)
210             table.ctiOffsets[j] = table.ctiDefault;
211         for (unsigned j = data.cases.size(); j--;) {
212             SwitchCase& myCase = data.cases[j];
213             table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
214                 linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
215         }
216     }
217     
218     for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
219         if (usedJumpTables.get(i))
220             continue;
221         
222         m_codeBlock->switchJumpTable(i).clear();
223     }
224
225     // NOTE: we cannot clear string switch tables because (1) we're running concurrently
226     // and we cannot deref StringImpl's and (2) it would be weird to deref those
227     // StringImpl's since we refer to them.
228     for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
229         SwitchData& data = **switchDataIter;
230         if (!data.didUseJumpTable)
231             continue;
232         
233         if (data.kind != SwitchString)
234             continue;
235         
236         StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
237         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
238         StringJumpTable::StringOffsetTable::iterator iter;
239         StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
240         for (iter = table.offsetTable.begin(); iter != end; ++iter)
241             iter->value.ctiOffset = table.ctiDefault;
242         for (unsigned j = data.cases.size(); j--;) {
243             SwitchCase& myCase = data.cases[j];
244             iter = table.offsetTable.find(myCase.value.stringImpl());
245             RELEASE_ASSERT(iter != end);
246             iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
247         }
248     }
249
250     // Link all calls out from the JIT code to their respective functions.
251     for (unsigned i = 0; i < m_calls.size(); ++i)
252         linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
253
254     for (unsigned i = m_getByIds.size(); i--;)
255         m_getByIds[i].finalize(linkBuffer);
256     for (unsigned i = m_putByIds.size(); i--;)
257         m_putByIds[i].finalize(linkBuffer);
258
259     for (unsigned i = 0; i < m_ins.size(); ++i) {
260         StructureStubInfo& info = *m_ins[i].m_stubInfo;
261
262         CodeLocationLabel start = linkBuffer.locationOf(m_ins[i].m_jump);
263         info.patch.start = start;
264
265         ptrdiff_t inlineSize = MacroAssembler::differenceBetweenCodePtr(
266             start, linkBuffer.locationOf(m_ins[i].m_done));
267         RELEASE_ASSERT(inlineSize >= 0);
268         info.patch.inlineSize = inlineSize;
269
270         info.patch.deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
271             start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call()));
272
273         info.patch.deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
274             start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
275     }
276     
277     for (auto& record : m_jsCalls) {
278         CallLinkInfo& info = *record.info;
279         linkBuffer.link(record.slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress()));
280         info.setCallLocations(
281             CodeLocationLabel(linkBuffer.locationOfNearCall(record.slowCall)),
282             CodeLocationLabel(linkBuffer.locationOf(record.targetToCheck)),
283             linkBuffer.locationOfNearCall(record.fastCall));
284     }
285     
286     for (JSDirectCallRecord& record : m_jsDirectCalls) {
287         CallLinkInfo& info = *record.info;
288         linkBuffer.link(record.call, linkBuffer.locationOf(record.slowPath));
289         info.setCallLocations(
290             CodeLocationLabel(),
291             linkBuffer.locationOf(record.slowPath),
292             linkBuffer.locationOfNearCall(record.call));
293     }
294     
295     for (JSDirectTailCallRecord& record : m_jsDirectTailCalls) {
296         CallLinkInfo& info = *record.info;
297         info.setCallLocations(
298             linkBuffer.locationOf(record.patchableJump),
299             linkBuffer.locationOf(record.slowPath),
300             linkBuffer.locationOfNearCall(record.call));
301     }
302     
303     MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
304     CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
305     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
306         OSRExit& exit = m_jitCode->osrExit[i];
307         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
308         linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
309         exit.correctJump(linkBuffer);
310         if (info.m_replacementSource.isSet()) {
311             m_jitCode->common.jumpReplacements.append(JumpReplacement(
312                 linkBuffer.locationOf(info.m_replacementSource),
313                 linkBuffer.locationOf(info.m_replacementDestination)));
314         }
315     }
316     
317     if (m_graph.compilation()) {
318         ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
319         for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
320             Vector<Label>& labels = m_exitSiteLabels[i];
321             Vector<const void*> addresses;
322             for (unsigned j = 0; j < labels.size(); ++j)
323                 addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
324             m_graph.compilation()->addOSRExitSite(addresses);
325         }
326     } else
327         ASSERT(!m_exitSiteLabels.size());
328
329     m_jitCode->common.compilation = m_graph.compilation();
330     
331     // Link new DFG exception handlers and remove baseline JIT handlers.
332     m_codeBlock->clearExceptionHandlers();
333     for (unsigned  i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) {
334         OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo;
335         if (info.m_replacementDestination.isSet()) {
336             // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow.
337             // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
338             // If this *is set*, it means we will be landing at this code location from genericUnwind from an
339             // exception thrown in a child call frame.
340             CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination);
341             HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
342             CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
343             newExceptionHandler.start = callSite.bits();
344             newExceptionHandler.end = callSite.bits() + 1;
345             newExceptionHandler.nativeCode = catchLabel;
346             m_codeBlock->appendExceptionHandler(newExceptionHandler);
347         }
348     }
349
350     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
351         m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer));
352 }
353
354 void JITCompiler::compile()
355 {
356     setStartOfCode();
357     compileEntry();
358     m_speculative = std::make_unique<SpeculativeJIT>(*this);
359
360     // Plant a check that sufficient space is available in the JSStack.
361     addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
362     Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), GPRInfo::regT1);
363
364     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
365     checkStackPointerAlignment();
366     compileSetupRegistersForEntry();
367     compileEntryExecutionFlag();
368     compileBody();
369     setEndOfMainPath();
370
371     // === Footer code generation ===
372     //
373     // Generate the stack overflow handling; if the stack check in the entry head fails,
374     // we need to call out to a helper function to throw the StackOverflowError.
375     stackOverflow.link(this);
376
377     emitStoreCodeOrigin(CodeOrigin(0));
378
379     if (maxFrameExtentForSlowPathCall)
380         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
381
382     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
383
384     // Generate slow path code.
385     m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
386     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
387     
388     compileExceptionHandlers();
389     linkOSRExits();
390     
391     // Create OSR entry trampolines if necessary.
392     m_speculative->createOSREntries();
393     setEndOfCode();
394
395     auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
396     if (linkBuffer->didFailToAllocate()) {
397         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
398         return;
399     }
400     
401     link(*linkBuffer);
402     m_speculative->linkOSREntries(*linkBuffer);
403
404     m_jitCode->shrinkToFit();
405     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
406
407     disassemble(*linkBuffer);
408     
409     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
410         m_graph.m_plan, WTFMove(m_jitCode), WTFMove(linkBuffer));
411 }
412
413 void JITCompiler::compileFunction()
414 {
415     setStartOfCode();
416     compileEntry();
417
418     // === Function header code generation ===
419     // This is the main entry point, without performing an arity check.
420     // If we needed to perform an arity check we will already have moved the return address,
421     // so enter after this.
422     Label fromArityCheck(this);
423     // Plant a check that sufficient space is available in the JSStack.
424     addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
425     Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), GPRInfo::regT1);
426
427     // Move the stack pointer down to accommodate locals
428     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
429     checkStackPointerAlignment();
430
431     compileSetupRegistersForEntry();
432     compileEntryExecutionFlag();
433
434     // === Function body code generation ===
435     m_speculative = std::make_unique<SpeculativeJIT>(*this);
436     compileBody();
437     setEndOfMainPath();
438
439     // === Function footer code generation ===
440     //
441     // Generate code to perform the stack overflow handling (if the stack check in
442     // the function header fails), and generate the entry point with arity check.
443     //
444     // Generate the stack overflow handling; if the stack check in the function head fails,
445     // we need to call out to a helper function to throw the StackOverflowError.
446     stackOverflow.link(this);
447
448     emitStoreCodeOrigin(CodeOrigin(0));
449
450     if (maxFrameExtentForSlowPathCall)
451         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
452
453     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
454     
455     // The fast entry point into a function does not check the correct number of arguments
456     // have been passed to the call (we only use the fast entry point where we can statically
457     // determine the correct number of arguments have been passed, or have already checked).
458     // In cases where an arity check is necessary, we enter here.
459     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
460     m_arityCheck = label();
461     compileEntry();
462
463     load32(AssemblyHelpers::payloadFor((VirtualRegister)CallFrameSlot::argumentCount), GPRInfo::regT1);
464     branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
465     emitStoreCodeOrigin(CodeOrigin(0));
466     if (maxFrameExtentForSlowPathCall)
467         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
468     m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
469     if (maxFrameExtentForSlowPathCall)
470         addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
471     branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
472     emitStoreCodeOrigin(CodeOrigin(0));
473     move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
474     m_callArityFixup = call();
475     jump(fromArityCheck);
476     
477     // Generate slow path code.
478     m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
479     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
480     
481     compileExceptionHandlers();
482     linkOSRExits();
483     
484     // Create OSR entry trampolines if necessary.
485     m_speculative->createOSREntries();
486     setEndOfCode();
487
488     // === Link ===
489     auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
490     if (linkBuffer->didFailToAllocate()) {
491         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
492         return;
493     }
494     link(*linkBuffer);
495     m_speculative->linkOSREntries(*linkBuffer);
496     
497     m_jitCode->shrinkToFit();
498     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
499     
500     linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
501     
502     disassemble(*linkBuffer);
503
504     MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
505
506     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
507         m_graph.m_plan, WTFMove(m_jitCode), WTFMove(linkBuffer), withArityCheck);
508 }
509
510 void JITCompiler::disassemble(LinkBuffer& linkBuffer)
511 {
512     if (shouldDumpDisassembly()) {
513         m_disassembler->dump(linkBuffer);
514         linkBuffer.didAlreadyDisassemble();
515     }
516     
517     if (m_graph.m_plan.compilation)
518         m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
519 }
520
521 #if USE(JSVALUE32_64)
522 void* JITCompiler::addressOfDoubleConstant(Node* node)
523 {
524     double value = node->asNumber();
525     int64_t valueBits = bitwise_cast<int64_t>(value);
526     auto it = m_graph.m_doubleConstantsMap.find(valueBits);
527     if (it != m_graph.m_doubleConstantsMap.end())
528         return it->second;
529
530     if (!m_graph.m_doubleConstants)
531         m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
532
533     double* addressInConstantPool = m_graph.m_doubleConstants->add();
534     *addressInConstantPool = value;
535     m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
536     return addressInConstantPool;
537 }
538 #endif
539
540 void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
541 {
542     // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
543     if (!basicBlock.intersectionOfCFAHasVisited)
544         return;
545         
546     OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
547     
548     entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
549         
550     // Fix the expected values: in our protocol, a dead variable will have an expected
551     // value of (None, []). But the old JIT may stash some values there. So we really
552     // need (Top, TOP).
553     for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
554         Node* node = basicBlock.variablesAtHead.argument(argument);
555         if (!node || !node->shouldGenerate())
556             entry->m_expectedValues.argument(argument).makeHeapTop();
557     }
558     for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
559         Node* node = basicBlock.variablesAtHead.local(local);
560         if (!node || !node->shouldGenerate())
561             entry->m_expectedValues.local(local).makeHeapTop();
562         else {
563             VariableAccessData* variable = node->variableAccessData();
564             entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
565                 
566             switch (variable->flushFormat()) {
567             case FlushedDouble:
568                 entry->m_localsForcedDouble.set(local);
569                 break;
570             case FlushedInt52:
571                 entry->m_localsForcedAnyInt.set(local);
572                 break;
573             default:
574                 break;
575             }
576             
577             if (variable->local() != variable->machineLocal()) {
578                 entry->m_reshufflings.append(
579                     OSREntryReshuffling(
580                         variable->local().offset(), variable->machineLocal().offset()));
581             }
582         }
583     }
584         
585     entry->m_reshufflings.shrinkToFit();
586 }
587
588 void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail)
589 {
590     OSRExit exit(kind, JSValueRegs(), MethodOfGettingAValueProfile(), m_speculative.get(), eventStreamIndex);
591     exit.m_codeOrigin = opCatchOrigin;
592     exit.m_exceptionHandlerCallSiteIndex = callSite;
593     OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail);
594     jitCode()->appendOSRExit(exit);
595     m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite });
596 }
597
598 void JITCompiler::exceptionCheck()
599 {
600     // It's important that we use origin.forExit here. Consider if we hoist string
601     // addition outside a loop, and that we exit at the point of that concatenation
602     // from an out of memory exception.
603     // If the original loop had a try/catch around string concatenation, if we "catch"
604     // that exception inside the loop, then the loops induction variable will be undefined 
605     // in the OSR exit value recovery. It's more defensible for the string concatenation, 
606     // then, to not be caught by the for loops' try/catch.
607     // Here is the program I'm speaking about:
608     //
609     // >>>> lets presume "c = a + b" gets hoisted here.
610     // for (var i = 0; i < length; i++) {
611     //     try {
612     //         c = a + b
613     //     } catch(e) { 
614     //         If we threw an out of memory error, and we cought the exception
615     //         right here, then "i" would almost certainly be undefined, which
616     //         would make no sense.
617     //         ... 
618     //     }
619     // }
620     CodeOrigin opCatchOrigin;
621     HandlerInfo* exceptionHandler;
622     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); 
623     if (willCatchException) {
624         unsigned streamIndex = m_speculative->m_outOfLineStreamIndex ? *m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size();
625         MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck();
626         // We assume here that this is called after callOpeartion()/appendCall() is called.
627         appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException);
628     } else
629         m_exceptionChecks.append(emitExceptionCheck());
630 }
631
632 CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex)
633 {
634     CodeOrigin opCatchOrigin;
635     HandlerInfo* exceptionHandler;
636     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler);
637     CallSiteIndex callSite = addCallSite(callSiteCodeOrigin);
638     if (willCatchException)
639         appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite);
640     return callSite;
641 }
642
643 void JITCompiler::setEndOfMainPath()
644 {
645     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic);
646     if (LIKELY(!m_disassembler))
647         return;
648     m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints());
649 }
650
651 void JITCompiler::setEndOfCode()
652 {
653     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
654     if (LIKELY(!m_disassembler))
655         return;
656     m_disassembler->setEndOfCode(labelIgnoringWatchpoints());
657 }
658
659 } } // namespace JSC::DFG
660
661 #endif // ENABLE(DFG_JIT)