[Re-landing] Use JIT probes for DFG OSR exit.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGJITCompiler.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGJITCompiler.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "DFGFailedFinalizer.h"
33 #include "DFGInlineCacheWrapperInlines.h"
34 #include "DFGJITCode.h"
35 #include "DFGJITFinalizer.h"
36 #include "DFGOSRExit.h"
37 #include "DFGOperations.h"
38 #include "DFGRegisterBank.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DFGSpeculativeJIT.h"
41 #include "DFGThunks.h"
42 #include "JSCInlines.h"
43 #include "JSCJSValueInlines.h"
44 #include "LinkBuffer.h"
45 #include "MaxFrameExtentForSlowPathCall.h"
46 #include "StructureStubInfo.h"
47 #include "ThunkGenerators.h"
48 #include "VM.h"
49
50 namespace JSC { namespace DFG {
51
52 JITCompiler::JITCompiler(Graph& dfg)
53     : CCallHelpers(dfg.m_codeBlock)
54     , m_graph(dfg)
55     , m_jitCode(adoptRef(new JITCode()))
56     , m_blockHeads(dfg.numBlocks())
57     , m_pcToCodeOriginMapBuilder(dfg.m_vm)
58 {
59     if (UNLIKELY(shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler))
60         m_disassembler = std::make_unique<Disassembler>(dfg);
61 #if ENABLE(FTL_JIT)
62     m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy);
63     for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes)
64         m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger);
65 #endif
66 }
67
68 JITCompiler::~JITCompiler()
69 {
70 }
71
72 void JITCompiler::linkOSRExits()
73 {
74     ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
75     if (UNLIKELY(m_graph.compilation())) {
76         for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
77             OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
78             Vector<Label> labels;
79             if (!info.m_failureJumps.empty()) {
80                 for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
81                     labels.append(info.m_failureJumps.jumps()[j].label());
82             } else
83                 labels.append(info.m_replacementSource);
84             m_exitSiteLabels.append(labels);
85         }
86     }
87     
88     MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitThunkGenerator);
89     CodeLocationLabel osrExitThunkLabel = CodeLocationLabel(osrExitThunk.code());
90     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
91         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
92         JumpList& failureJumps = info.m_failureJumps;
93         if (!failureJumps.empty())
94             failureJumps.link(this);
95         else
96             info.m_replacementDestination = label();
97
98         jitAssertHasValidCallFrame();
99         store32(TrustedImm32(i), &vm()->osrExitIndex);
100         Jump target = jump();
101         addLinkTask([target, osrExitThunkLabel] (LinkBuffer& linkBuffer) {
102             linkBuffer.link(target, osrExitThunkLabel);
103         });
104     }
105 }
106
107 void JITCompiler::compileEntry()
108 {
109     // This code currently matches the old JIT. In the function header we need to
110     // save return address and call frame via the prologue and perform a fast stack check.
111     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
112     // We'll need to convert the remaining cti_ style calls (specifically the stack
113     // check) which will be dependent on stack layout. (We'd need to account for this in
114     // both normal return code and when jumping to an exception handler).
115     emitFunctionPrologue();
116     emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
117 }
118
119 void JITCompiler::compileSetupRegistersForEntry()
120 {
121     emitSaveCalleeSaves();
122     emitMaterializeTagCheckRegisters();    
123 }
124
125 void JITCompiler::compileEntryExecutionFlag()
126 {
127 #if ENABLE(FTL_JIT)
128     if (m_graph.m_plan.canTierUpAndOSREnter())
129         store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry);
130 #endif // ENABLE(FTL_JIT)
131 }
132
133 void JITCompiler::compileBody()
134 {
135     // We generate the speculative code path, followed by OSR exit code to return
136     // to the old JIT code if speculations fail.
137
138     bool compiledSpeculative = m_speculative->compile();
139     ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
140 }
141
142 void JITCompiler::compileExceptionHandlers()
143 {
144     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
145         m_exceptionChecksWithCallFrameRollback.link(this);
146
147         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
148
149         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
150         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
151         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
152         addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
153
154 #if CPU(X86)
155         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
156         poke(GPRInfo::argumentGPR0);
157         poke(GPRInfo::argumentGPR1, 1);
158 #endif
159         m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
160
161         jumpToExceptionHandler(*vm());
162     }
163
164     if (!m_exceptionChecks.empty()) {
165         m_exceptionChecks.link(this);
166
167         copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(*vm());
168
169         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
170         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
171         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
172
173 #if CPU(X86)
174         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
175         poke(GPRInfo::argumentGPR0);
176         poke(GPRInfo::argumentGPR1, 1);
177 #endif
178         m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
179
180         jumpToExceptionHandler(*vm());
181     }
182 }
183
184 void JITCompiler::link(LinkBuffer& linkBuffer)
185 {
186     // Link the code, populate data in CodeBlock data structures.
187     m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
188     m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
189
190     if (!m_graph.m_plan.inlineCallFrames->isEmpty())
191         m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
192     
193 #if USE(JSVALUE32_64)
194     m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
195 #endif
196     
197     m_graph.registerFrozenValues();
198
199     BitVector usedJumpTables;
200     for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
201         SwitchData& data = **iter;
202         if (!data.didUseJumpTable)
203             continue;
204         
205         if (data.kind == SwitchString)
206             continue;
207         
208         RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
209         
210         usedJumpTables.set(data.switchTableIndex);
211         SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
212         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
213         table.ctiOffsets.grow(table.branchOffsets.size());
214         for (unsigned j = table.ctiOffsets.size(); j--;)
215             table.ctiOffsets[j] = table.ctiDefault;
216         for (unsigned j = data.cases.size(); j--;) {
217             SwitchCase& myCase = data.cases[j];
218             table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
219                 linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
220         }
221     }
222     
223     for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
224         if (usedJumpTables.get(i))
225             continue;
226         
227         m_codeBlock->switchJumpTable(i).clear();
228     }
229
230     // NOTE: we cannot clear string switch tables because (1) we're running concurrently
231     // and we cannot deref StringImpl's and (2) it would be weird to deref those
232     // StringImpl's since we refer to them.
233     for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
234         SwitchData& data = **switchDataIter;
235         if (!data.didUseJumpTable)
236             continue;
237         
238         if (data.kind != SwitchString)
239             continue;
240         
241         StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
242         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
243         StringJumpTable::StringOffsetTable::iterator iter;
244         StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
245         for (iter = table.offsetTable.begin(); iter != end; ++iter)
246             iter->value.ctiOffset = table.ctiDefault;
247         for (unsigned j = data.cases.size(); j--;) {
248             SwitchCase& myCase = data.cases[j];
249             iter = table.offsetTable.find(myCase.value.stringImpl());
250             RELEASE_ASSERT(iter != end);
251             iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
252         }
253     }
254
255     // Link all calls out from the JIT code to their respective functions.
256     for (unsigned i = 0; i < m_calls.size(); ++i)
257         linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
258
259     for (unsigned i = m_getByIds.size(); i--;)
260         m_getByIds[i].finalize(linkBuffer);
261     for (unsigned i = m_getByIdsWithThis.size(); i--;)
262         m_getByIdsWithThis[i].finalize(linkBuffer);
263     for (unsigned i = m_putByIds.size(); i--;)
264         m_putByIds[i].finalize(linkBuffer);
265
266     for (unsigned i = 0; i < m_ins.size(); ++i) {
267         StructureStubInfo& info = *m_ins[i].m_stubInfo;
268
269         CodeLocationLabel start = linkBuffer.locationOf(m_ins[i].m_jump);
270         info.patch.start = start;
271
272         ptrdiff_t inlineSize = MacroAssembler::differenceBetweenCodePtr(
273             start, linkBuffer.locationOf(m_ins[i].m_done));
274         RELEASE_ASSERT(inlineSize >= 0);
275         info.patch.inlineSize = inlineSize;
276
277         info.patch.deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
278             start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call()));
279
280         info.patch.deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
281             start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
282     }
283     
284     for (auto& record : m_jsCalls) {
285         CallLinkInfo& info = *record.info;
286         linkBuffer.link(record.slowCall, FunctionPtr(vm()->getCTIStub(linkCallThunkGenerator).code().executableAddress()));
287         info.setCallLocations(
288             CodeLocationLabel(linkBuffer.locationOfNearCall(record.slowCall)),
289             CodeLocationLabel(linkBuffer.locationOf(record.targetToCheck)),
290             linkBuffer.locationOfNearCall(record.fastCall));
291     }
292     
293     for (JSDirectCallRecord& record : m_jsDirectCalls) {
294         CallLinkInfo& info = *record.info;
295         linkBuffer.link(record.call, linkBuffer.locationOf(record.slowPath));
296         info.setCallLocations(
297             CodeLocationLabel(),
298             linkBuffer.locationOf(record.slowPath),
299             linkBuffer.locationOfNearCall(record.call));
300     }
301     
302     for (JSDirectTailCallRecord& record : m_jsDirectTailCalls) {
303         CallLinkInfo& info = *record.info;
304         info.setCallLocations(
305             linkBuffer.locationOf(record.patchableJump),
306             linkBuffer.locationOf(record.slowPath),
307             linkBuffer.locationOfNearCall(record.call));
308     }
309     
310     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
311         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
312         if (info.m_replacementSource.isSet()) {
313             m_jitCode->common.jumpReplacements.append(JumpReplacement(
314                 linkBuffer.locationOf(info.m_replacementSource),
315                 linkBuffer.locationOf(info.m_replacementDestination)));
316         }
317     }
318     
319     if (UNLIKELY(m_graph.compilation())) {
320         ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
321         for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
322             Vector<Label>& labels = m_exitSiteLabels[i];
323             Vector<const void*> addresses;
324             for (unsigned j = 0; j < labels.size(); ++j)
325                 addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
326             m_graph.compilation()->addOSRExitSite(addresses);
327         }
328     } else
329         ASSERT(!m_exitSiteLabels.size());
330
331     m_jitCode->common.compilation = m_graph.compilation();
332     
333     // Link new DFG exception handlers and remove baseline JIT handlers.
334     m_codeBlock->clearExceptionHandlers();
335     for (unsigned  i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) {
336         OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo;
337         if (info.m_replacementDestination.isSet()) {
338             // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow.
339             // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
340             // If this *is set*, it means we will be landing at this code location from genericUnwind from an
341             // exception thrown in a child call frame.
342             CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination);
343             HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
344             CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
345             newExceptionHandler.start = callSite.bits();
346             newExceptionHandler.end = callSite.bits() + 1;
347             newExceptionHandler.nativeCode = catchLabel;
348             m_codeBlock->appendExceptionHandler(newExceptionHandler);
349         }
350     }
351
352     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
353         m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer));
354 }
355
356 static void emitStackOverflowCheck(JITCompiler& jit, MacroAssembler::JumpList& stackOverflow)
357 {
358     int frameTopOffset = virtualRegisterForLocal(jit.graph().requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register);
359     unsigned maxFrameSize = -frameTopOffset;
360
361     jit.addPtr(MacroAssembler::TrustedImm32(frameTopOffset), GPRInfo::callFrameRegister, GPRInfo::regT1);
362     if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
363         stackOverflow.append(jit.branchPtr(MacroAssembler::Above, GPRInfo::regT1, GPRInfo::callFrameRegister));
364     stackOverflow.append(jit.branchPtr(MacroAssembler::Above, MacroAssembler::AbsoluteAddress(jit.vm()->addressOfSoftStackLimit()), GPRInfo::regT1));
365 }
366
367 void JITCompiler::compile()
368 {
369     makeCatchOSREntryBuffer();
370
371     setStartOfCode();
372     compileEntry();
373     m_speculative = std::make_unique<SpeculativeJIT>(*this);
374
375     // Plant a check that sufficient space is available in the JSStack.
376     JumpList stackOverflow;
377     emitStackOverflowCheck(*this, stackOverflow);
378
379     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
380     checkStackPointerAlignment();
381     compileSetupRegistersForEntry();
382     compileEntryExecutionFlag();
383     compileBody();
384     setEndOfMainPath();
385
386     // === Footer code generation ===
387     //
388     // Generate the stack overflow handling; if the stack check in the entry head fails,
389     // we need to call out to a helper function to throw the StackOverflowError.
390     stackOverflow.link(this);
391
392     emitStoreCodeOrigin(CodeOrigin(0));
393
394     if (maxFrameExtentForSlowPathCall)
395         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
396
397     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
398
399     // Generate slow path code.
400     m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
401     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
402     
403     compileExceptionHandlers();
404     linkOSRExits();
405     
406     // Create OSR entry trampolines if necessary.
407     m_speculative->createOSREntries();
408     setEndOfCode();
409
410     auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
411     if (linkBuffer->didFailToAllocate()) {
412         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
413         return;
414     }
415     
416     link(*linkBuffer);
417     m_speculative->linkOSREntries(*linkBuffer);
418
419     m_jitCode->shrinkToFit();
420     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
421
422     disassemble(*linkBuffer);
423     
424     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
425         m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer));
426 }
427
428 void JITCompiler::compileFunction()
429 {
430     makeCatchOSREntryBuffer();
431
432     setStartOfCode();
433     compileEntry();
434
435     // === Function header code generation ===
436     // This is the main entry point, without performing an arity check.
437     // If we needed to perform an arity check we will already have moved the return address,
438     // so enter after this.
439     Label fromArityCheck(this);
440     // Plant a check that sufficient space is available in the JSStack.
441     JumpList stackOverflow;
442     emitStackOverflowCheck(*this, stackOverflow);
443
444     // Move the stack pointer down to accommodate locals
445     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
446     checkStackPointerAlignment();
447
448     compileSetupRegistersForEntry();
449     compileEntryExecutionFlag();
450
451     // === Function body code generation ===
452     m_speculative = std::make_unique<SpeculativeJIT>(*this);
453     compileBody();
454     setEndOfMainPath();
455
456     // === Function footer code generation ===
457     //
458     // Generate code to perform the stack overflow handling (if the stack check in
459     // the function header fails), and generate the entry point with arity check.
460     //
461     // Generate the stack overflow handling; if the stack check in the function head fails,
462     // we need to call out to a helper function to throw the StackOverflowError.
463     stackOverflow.link(this);
464
465     emitStoreCodeOrigin(CodeOrigin(0));
466
467     if (maxFrameExtentForSlowPathCall)
468         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
469
470     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
471     
472     // The fast entry point into a function does not check the correct number of arguments
473     // have been passed to the call (we only use the fast entry point where we can statically
474     // determine the correct number of arguments have been passed, or have already checked).
475     // In cases where an arity check is necessary, we enter here.
476     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
477     m_arityCheck = label();
478     compileEntry();
479
480     load32(AssemblyHelpers::payloadFor((VirtualRegister)CallFrameSlot::argumentCount), GPRInfo::regT1);
481     branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
482     emitStoreCodeOrigin(CodeOrigin(0));
483     if (maxFrameExtentForSlowPathCall)
484         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
485     m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
486     if (maxFrameExtentForSlowPathCall)
487         addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
488     branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
489     emitStoreCodeOrigin(CodeOrigin(0));
490     move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
491     m_callArityFixup = call();
492     jump(fromArityCheck);
493     
494     // Generate slow path code.
495     m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
496     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
497     
498     compileExceptionHandlers();
499     linkOSRExits();
500     
501     // Create OSR entry trampolines if necessary.
502     m_speculative->createOSREntries();
503     setEndOfCode();
504
505     // === Link ===
506     auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
507     if (linkBuffer->didFailToAllocate()) {
508         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
509         return;
510     }
511     link(*linkBuffer);
512     m_speculative->linkOSREntries(*linkBuffer);
513     
514     m_jitCode->shrinkToFit();
515     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
516     
517     linkBuffer->link(m_callArityFixup, FunctionPtr((vm()->getCTIStub(arityFixupGenerator)).code().executableAddress()));
518     
519     disassemble(*linkBuffer);
520
521     MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
522
523     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
524         m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck);
525 }
526
527 void JITCompiler::disassemble(LinkBuffer& linkBuffer)
528 {
529     if (shouldDumpDisassembly()) {
530         m_disassembler->dump(linkBuffer);
531         linkBuffer.didAlreadyDisassemble();
532     }
533     
534     if (UNLIKELY(m_graph.m_plan.compilation))
535         m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
536 }
537
538 #if USE(JSVALUE32_64)
539 void* JITCompiler::addressOfDoubleConstant(Node* node)
540 {
541     double value = node->asNumber();
542     int64_t valueBits = bitwise_cast<int64_t>(value);
543     auto it = m_graph.m_doubleConstantsMap.find(valueBits);
544     if (it != m_graph.m_doubleConstantsMap.end())
545         return it->second;
546
547     if (!m_graph.m_doubleConstants)
548         m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
549
550     double* addressInConstantPool = m_graph.m_doubleConstants->add();
551     *addressInConstantPool = value;
552     m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
553     return addressInConstantPool;
554 }
555 #endif
556
557 void JITCompiler::noticeCatchEntrypoint(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer, Vector<FlushFormat>&& argumentFormats)
558 {
559     RELEASE_ASSERT(basicBlock.isCatchEntrypoint);
560     RELEASE_ASSERT(basicBlock.intersectionOfCFAHasVisited); // An entrypoint is reachable by definition.
561     m_jitCode->common.appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.locationOf(blockHead).executableAddress(), WTFMove(argumentFormats));
562 }
563
564 void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
565 {
566     RELEASE_ASSERT(!basicBlock.isCatchEntrypoint);
567
568     // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
569     if (!basicBlock.intersectionOfCFAHasVisited)
570         return;
571
572     OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
573
574     entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
575         
576     // Fix the expected values: in our protocol, a dead variable will have an expected
577     // value of (None, []). But the old JIT may stash some values there. So we really
578     // need (Top, TOP).
579     for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
580         Node* node = basicBlock.variablesAtHead.argument(argument);
581         if (!node || !node->shouldGenerate())
582             entry->m_expectedValues.argument(argument).makeHeapTop();
583     }
584     for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
585         Node* node = basicBlock.variablesAtHead.local(local);
586         if (!node || !node->shouldGenerate())
587             entry->m_expectedValues.local(local).makeHeapTop();
588         else {
589             VariableAccessData* variable = node->variableAccessData();
590             entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
591                 
592             switch (variable->flushFormat()) {
593             case FlushedDouble:
594                 entry->m_localsForcedDouble.set(local);
595                 break;
596             case FlushedInt52:
597                 entry->m_localsForcedAnyInt.set(local);
598                 break;
599             default:
600                 break;
601             }
602             
603             if (variable->local() != variable->machineLocal()) {
604                 entry->m_reshufflings.append(
605                     OSREntryReshuffling(
606                         variable->local().offset(), variable->machineLocal().offset()));
607             }
608         }
609     }
610         
611     entry->m_reshufflings.shrinkToFit();
612 }
613
614 void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail)
615 {
616     OSRExit exit(kind, JSValueRegs(), MethodOfGettingAValueProfile(), m_speculative.get(), eventStreamIndex);
617     exit.m_codeOrigin = opCatchOrigin;
618     exit.m_exceptionHandlerCallSiteIndex = callSite;
619     OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail);
620     jitCode()->appendOSRExit(exit);
621     m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite });
622 }
623
624 void JITCompiler::exceptionCheck()
625 {
626     // It's important that we use origin.forExit here. Consider if we hoist string
627     // addition outside a loop, and that we exit at the point of that concatenation
628     // from an out of memory exception.
629     // If the original loop had a try/catch around string concatenation, if we "catch"
630     // that exception inside the loop, then the loops induction variable will be undefined 
631     // in the OSR exit value recovery. It's more defensible for the string concatenation, 
632     // then, to not be caught by the for loops' try/catch.
633     // Here is the program I'm speaking about:
634     //
635     // >>>> lets presume "c = a + b" gets hoisted here.
636     // for (var i = 0; i < length; i++) {
637     //     try {
638     //         c = a + b
639     //     } catch(e) { 
640     //         If we threw an out of memory error, and we cought the exception
641     //         right here, then "i" would almost certainly be undefined, which
642     //         would make no sense.
643     //         ... 
644     //     }
645     // }
646     CodeOrigin opCatchOrigin;
647     HandlerInfo* exceptionHandler;
648     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); 
649     if (willCatchException) {
650         unsigned streamIndex = m_speculative->m_outOfLineStreamIndex ? *m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size();
651         MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck(*vm());
652         // We assume here that this is called after callOpeartion()/appendCall() is called.
653         appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException);
654     } else
655         m_exceptionChecks.append(emitExceptionCheck(*vm()));
656 }
657
658 CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex)
659 {
660     CodeOrigin opCatchOrigin;
661     HandlerInfo* exceptionHandler;
662     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler);
663     CallSiteIndex callSite = addCallSite(callSiteCodeOrigin);
664     if (willCatchException)
665         appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite);
666     return callSite;
667 }
668
669 void JITCompiler::setEndOfMainPath()
670 {
671     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic);
672     if (LIKELY(!m_disassembler))
673         return;
674     m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints());
675 }
676
677 void JITCompiler::setEndOfCode()
678 {
679     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
680     if (LIKELY(!m_disassembler))
681         return;
682     m_disassembler->setEndOfCode(labelIgnoringWatchpoints());
683 }
684
685 void JITCompiler::makeCatchOSREntryBuffer()
686 {
687     if (m_graph.m_maxLocalsForCatchOSREntry) {
688         uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
689         m_jitCode->common.catchOSREntryBuffer = vm()->scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
690     }
691 }
692
693 } } // namespace JSC::DFG
694
695 #endif // ENABLE(DFG_JIT)