Templatize CodePtr/Refs/FunctionPtrs with PtrTags.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGJITCompiler.cpp
1 /*
2  * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGJITCompiler.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "DFGFailedFinalizer.h"
33 #include "DFGInlineCacheWrapperInlines.h"
34 #include "DFGJITCode.h"
35 #include "DFGJITFinalizer.h"
36 #include "DFGOSRExit.h"
37 #include "DFGOperations.h"
38 #include "DFGRegisterBank.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DFGSpeculativeJIT.h"
41 #include "DFGThunks.h"
42 #include "JSCInlines.h"
43 #include "JSCJSValueInlines.h"
44 #include "LinkBuffer.h"
45 #include "MaxFrameExtentForSlowPathCall.h"
46 #include "StructureStubInfo.h"
47 #include "ThunkGenerators.h"
48 #include "VM.h"
49
50 namespace JSC { namespace DFG {
51
52 JITCompiler::JITCompiler(Graph& dfg)
53     : CCallHelpers(dfg.m_codeBlock)
54     , m_graph(dfg)
55     , m_jitCode(adoptRef(new JITCode()))
56     , m_blockHeads(dfg.numBlocks())
57     , m_pcToCodeOriginMapBuilder(dfg.m_vm)
58 {
59     if (UNLIKELY(shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler))
60         m_disassembler = std::make_unique<Disassembler>(dfg);
61 #if ENABLE(FTL_JIT)
62     m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy);
63     for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes)
64         m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger);
65 #endif
66 }
67
68 JITCompiler::~JITCompiler()
69 {
70 }
71
72 void JITCompiler::linkOSRExits()
73 {
74     ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
75     if (UNLIKELY(m_graph.compilation())) {
76         for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
77             OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
78             Vector<Label> labels;
79             if (!info.m_failureJumps.empty()) {
80                 for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
81                     labels.append(info.m_failureJumps.jumps()[j].label());
82             } else
83                 labels.append(info.m_replacementSource);
84             m_exitSiteLabels.append(labels);
85         }
86     }
87     
88     MacroAssemblerCodeRef<JITThunkPtrTag> osrExitThunk = vm()->getCTIStub(osrExitThunkGenerator);
89     auto osrExitThunkLabel = CodeLocationLabel<JITThunkPtrTag>(osrExitThunk.code());
90     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
91         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
92         JumpList& failureJumps = info.m_failureJumps;
93         if (!failureJumps.empty())
94             failureJumps.link(this);
95         else
96             info.m_replacementDestination = label();
97
98         jitAssertHasValidCallFrame();
99         store32(TrustedImm32(i), &vm()->osrExitIndex);
100         if (Options::useProbeOSRExit()) {
101             Jump target = jump();
102             addLinkTask([target, osrExitThunkLabel] (LinkBuffer& linkBuffer) {
103                 linkBuffer.link(target, osrExitThunkLabel);
104             });
105         } else {
106             OSRExit& exit = m_jitCode->osrExit[i];
107             exit.setPatchableCodeOffset(patchableJump());
108         }
109     }
110 }
111
112 void JITCompiler::compileEntry()
113 {
114     // This code currently matches the old JIT. In the function header we need to
115     // save return address and call frame via the prologue and perform a fast stack check.
116     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
117     // We'll need to convert the remaining cti_ style calls (specifically the stack
118     // check) which will be dependent on stack layout. (We'd need to account for this in
119     // both normal return code and when jumping to an exception handler).
120     emitFunctionPrologue();
121     emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
122 }
123
124 void JITCompiler::compileSetupRegistersForEntry()
125 {
126     emitSaveCalleeSaves();
127     emitMaterializeTagCheckRegisters();    
128 }
129
130 void JITCompiler::compileEntryExecutionFlag()
131 {
132 #if ENABLE(FTL_JIT)
133     if (m_graph.m_plan.canTierUpAndOSREnter())
134         store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry);
135 #endif // ENABLE(FTL_JIT)
136 }
137
138 void JITCompiler::compileBody()
139 {
140     // We generate the speculative code path, followed by OSR exit code to return
141     // to the old JIT code if speculations fail.
142
143     bool compiledSpeculative = m_speculative->compile();
144     ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
145 }
146
147 void JITCompiler::compileExceptionHandlers()
148 {
149     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
150         m_exceptionChecksWithCallFrameRollback.link(this);
151
152         copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
153
154         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
155         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
156         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
157         addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
158
159 #if CPU(X86)
160         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
161         poke(GPRInfo::argumentGPR0);
162         poke(GPRInfo::argumentGPR1, 1);
163 #endif
164         m_calls.append(CallLinkRecord(call(OperationPtrTag), FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame)));
165
166         jumpToExceptionHandler(*vm());
167     }
168
169     if (!m_exceptionChecks.empty()) {
170         m_exceptionChecks.link(this);
171
172         copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
173
174         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
175         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
176         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
177
178 #if CPU(X86)
179         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
180         poke(GPRInfo::argumentGPR0);
181         poke(GPRInfo::argumentGPR1, 1);
182 #endif
183         m_calls.append(CallLinkRecord(call(OperationPtrTag), FunctionPtr<OperationPtrTag>(lookupExceptionHandler)));
184
185         jumpToExceptionHandler(*vm());
186     }
187 }
188
189 void JITCompiler::link(LinkBuffer& linkBuffer)
190 {
191     // Link the code, populate data in CodeBlock data structures.
192     m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
193     m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
194
195     if (!m_graph.m_plan.inlineCallFrames->isEmpty())
196         m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
197     
198 #if USE(JSVALUE32_64)
199     m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
200 #endif
201     
202     m_graph.registerFrozenValues();
203
204     BitVector usedJumpTables;
205     for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
206         SwitchData& data = **iter;
207         if (!data.didUseJumpTable)
208             continue;
209         
210         if (data.kind == SwitchString)
211             continue;
212         
213         RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
214         
215         usedJumpTables.set(data.switchTableIndex);
216         SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
217         table.ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]);
218         table.ctiOffsets.grow(table.branchOffsets.size());
219         for (unsigned j = table.ctiOffsets.size(); j--;)
220             table.ctiOffsets[j] = table.ctiDefault;
221         for (unsigned j = data.cases.size(); j--;) {
222             SwitchCase& myCase = data.cases[j];
223             table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
224                 linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[myCase.target.block->index]);
225         }
226     }
227     
228     for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
229         if (usedJumpTables.get(i))
230             continue;
231         
232         m_codeBlock->switchJumpTable(i).clear();
233     }
234
235     // NOTE: we cannot clear string switch tables because (1) we're running concurrently
236     // and we cannot deref StringImpl's and (2) it would be weird to deref those
237     // StringImpl's since we refer to them.
238     for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
239         SwitchData& data = **switchDataIter;
240         if (!data.didUseJumpTable)
241             continue;
242         
243         if (data.kind != SwitchString)
244             continue;
245         
246         StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
247
248         table.ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]);
249         StringJumpTable::StringOffsetTable::iterator iter;
250         StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
251         for (iter = table.offsetTable.begin(); iter != end; ++iter)
252             iter->value.ctiOffset = table.ctiDefault;
253         for (unsigned j = data.cases.size(); j--;) {
254             SwitchCase& myCase = data.cases[j];
255             iter = table.offsetTable.find(myCase.value.stringImpl());
256             RELEASE_ASSERT(iter != end);
257             iter->value.ctiOffset = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[myCase.target.block->index]);
258         }
259     }
260
261     // Link all calls out from the JIT code to their respective functions.
262     for (unsigned i = 0; i < m_calls.size(); ++i)
263         linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
264
265     for (unsigned i = m_getByIds.size(); i--;)
266         m_getByIds[i].finalize(linkBuffer);
267     for (unsigned i = m_getByIdsWithThis.size(); i--;)
268         m_getByIdsWithThis[i].finalize(linkBuffer);
269     for (unsigned i = m_putByIds.size(); i--;)
270         m_putByIds[i].finalize(linkBuffer);
271
272     for (unsigned i = 0; i < m_ins.size(); ++i) {
273         StructureStubInfo& info = *m_ins[i].m_stubInfo;
274
275         CodeLocationLabel<JITStubRoutinePtrTag> start = linkBuffer.locationOf<JITStubRoutinePtrTag>(m_ins[i].m_jump);
276         info.patch.start = start;
277
278         ptrdiff_t inlineSize = MacroAssembler::differenceBetweenCodePtr(
279             start, linkBuffer.locationOf<JSInternalPtrTag>(m_ins[i].m_done));
280         RELEASE_ASSERT(inlineSize >= 0);
281         info.patch.inlineSize = inlineSize;
282
283         info.patch.deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
284             start, linkBuffer.locationOf<JSInternalPtrTag>(m_ins[i].m_slowPathGenerator->call()));
285
286         info.patch.deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
287             start, linkBuffer.locationOf<JSInternalPtrTag>(m_ins[i].m_slowPathGenerator->label()));
288     }
289     
290     auto linkCallThunk = FunctionPtr<NoPtrTag>(vm()->getCTIStub(linkCallThunkGenerator).retaggedCode<NoPtrTag>());
291     for (auto& record : m_jsCalls) {
292         CallLinkInfo& info = *record.info;
293         linkBuffer.link(record.slowCall, linkCallThunk);
294         info.setCallLocations(
295             CodeLocationLabel<JSEntryPtrTag>(linkBuffer.locationOfNearCall<JSEntryPtrTag>(record.slowCall)),
296             CodeLocationLabel<JSEntryPtrTag>(linkBuffer.locationOf<JSEntryPtrTag>(record.targetToCheck)),
297             linkBuffer.locationOfNearCall<JSEntryPtrTag>(record.fastCall));
298     }
299     
300     for (JSDirectCallRecord& record : m_jsDirectCalls) {
301         CallLinkInfo& info = *record.info;
302         linkBuffer.link(record.call, linkBuffer.locationOf<NoPtrTag>(record.slowPath));
303         info.setCallLocations(
304             CodeLocationLabel<JSEntryPtrTag>(),
305             linkBuffer.locationOf<JSEntryPtrTag>(record.slowPath),
306             linkBuffer.locationOfNearCall<JSEntryPtrTag>(record.call));
307     }
308     
309     for (JSDirectTailCallRecord& record : m_jsDirectTailCalls) {
310         CallLinkInfo& info = *record.info;
311         info.setCallLocations(
312             linkBuffer.locationOf<JSEntryPtrTag>(record.patchableJump),
313             linkBuffer.locationOf<JSEntryPtrTag>(record.slowPath),
314             linkBuffer.locationOfNearCall<JSEntryPtrTag>(record.call));
315     }
316     
317     MacroAssemblerCodeRef<JITThunkPtrTag> osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
318     auto target = CodeLocationLabel<JITThunkPtrTag>(osrExitThunk.code());
319     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
320         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
321         if (!Options::useProbeOSRExit()) {
322             OSRExit& exit = m_jitCode->osrExit[i];
323             linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
324             exit.correctJump(linkBuffer);
325         }
326         if (info.m_replacementSource.isSet()) {
327             m_jitCode->common.jumpReplacements.append(JumpReplacement(
328                 linkBuffer.locationOf<JSInternalPtrTag>(info.m_replacementSource),
329                 linkBuffer.locationOf<OSRExitPtrTag>(info.m_replacementDestination)));
330         }
331     }
332     
333     if (UNLIKELY(m_graph.compilation())) {
334         ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
335         for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
336             Vector<Label>& labels = m_exitSiteLabels[i];
337             Vector<MacroAssemblerCodePtr<JSInternalPtrTag>> addresses;
338             for (unsigned j = 0; j < labels.size(); ++j)
339                 addresses.append(linkBuffer.locationOf<JSInternalPtrTag>(labels[j]));
340             m_graph.compilation()->addOSRExitSite(addresses);
341         }
342     } else
343         ASSERT(!m_exitSiteLabels.size());
344
345     m_jitCode->common.compilation = m_graph.compilation();
346     
347     // Link new DFG exception handlers and remove baseline JIT handlers.
348     m_codeBlock->clearExceptionHandlers();
349     for (unsigned  i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) {
350         OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo;
351         if (info.m_replacementDestination.isSet()) {
352             // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow.
353             // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
354             // If this *is set*, it means we will be landing at this code location from genericUnwind from an
355             // exception thrown in a child call frame.
356             CodeLocationLabel<ExceptionHandlerPtrTag> catchLabel = linkBuffer.locationOf<ExceptionHandlerPtrTag>(info.m_replacementDestination);
357             HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
358             CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
359             newExceptionHandler.start = callSite.bits();
360             newExceptionHandler.end = callSite.bits() + 1;
361             newExceptionHandler.nativeCode = catchLabel;
362             m_codeBlock->appendExceptionHandler(newExceptionHandler);
363         }
364     }
365
366     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
367         m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer));
368 }
369
370 static void emitStackOverflowCheck(JITCompiler& jit, MacroAssembler::JumpList& stackOverflow)
371 {
372     int frameTopOffset = virtualRegisterForLocal(jit.graph().requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register);
373     unsigned maxFrameSize = -frameTopOffset;
374
375     jit.addPtr(MacroAssembler::TrustedImm32(frameTopOffset), GPRInfo::callFrameRegister, GPRInfo::regT1);
376     if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
377         stackOverflow.append(jit.branchPtr(MacroAssembler::Above, GPRInfo::regT1, GPRInfo::callFrameRegister));
378     stackOverflow.append(jit.branchPtr(MacroAssembler::Above, MacroAssembler::AbsoluteAddress(jit.vm()->addressOfSoftStackLimit()), GPRInfo::regT1));
379 }
380
381 void JITCompiler::compile()
382 {
383     makeCatchOSREntryBuffer();
384
385     setStartOfCode();
386     compileEntry();
387     m_speculative = std::make_unique<SpeculativeJIT>(*this);
388
389     // Plant a check that sufficient space is available in the JSStack.
390     JumpList stackOverflow;
391     emitStackOverflowCheck(*this, stackOverflow);
392
393     addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister);
394     if (Options::zeroStackFrame())
395         clearStackFrame(GPRInfo::callFrameRegister, stackPointerRegister, GPRInfo::regT0, m_graph.frameRegisterCount() * sizeof(Register));
396     checkStackPointerAlignment();
397     compileSetupRegistersForEntry();
398     compileEntryExecutionFlag();
399     compileBody();
400     setEndOfMainPath();
401
402     // === Footer code generation ===
403     //
404     // Generate the stack overflow handling; if the stack check in the entry head fails,
405     // we need to call out to a helper function to throw the StackOverflowError.
406     stackOverflow.link(this);
407
408     emitStoreCodeOrigin(CodeOrigin(0));
409
410     if (maxFrameExtentForSlowPathCall)
411         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
412
413     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
414
415     // Generate slow path code.
416     m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
417     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
418     
419     compileExceptionHandlers();
420     linkOSRExits();
421     
422     // Create OSR entry trampolines if necessary.
423     m_speculative->createOSREntries();
424     setEndOfCode();
425
426     auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
427     if (linkBuffer->didFailToAllocate()) {
428         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
429         return;
430     }
431     
432     link(*linkBuffer);
433     m_speculative->linkOSREntries(*linkBuffer);
434
435     m_jitCode->shrinkToFit();
436     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
437
438     disassemble(*linkBuffer);
439     
440     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
441         m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer));
442 }
443
444 void JITCompiler::compileFunction()
445 {
446     makeCatchOSREntryBuffer();
447
448     setStartOfCode();
449     compileEntry();
450
451     // === Function header code generation ===
452     // This is the main entry point, without performing an arity check.
453     // If we needed to perform an arity check we will already have moved the return address,
454     // so enter after this.
455     Label fromArityCheck(this);
456     // Plant a check that sufficient space is available in the JSStack.
457     JumpList stackOverflow;
458     emitStackOverflowCheck(*this, stackOverflow);
459
460     // Move the stack pointer down to accommodate locals
461     addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister);
462     if (Options::zeroStackFrame())
463         clearStackFrame(GPRInfo::callFrameRegister, stackPointerRegister, GPRInfo::regT0, m_graph.frameRegisterCount() * sizeof(Register));
464     checkStackPointerAlignment();
465
466     compileSetupRegistersForEntry();
467     compileEntryExecutionFlag();
468
469     // === Function body code generation ===
470     m_speculative = std::make_unique<SpeculativeJIT>(*this);
471     compileBody();
472     setEndOfMainPath();
473
474     // === Function footer code generation ===
475     //
476     // Generate code to perform the stack overflow handling (if the stack check in
477     // the function header fails), and generate the entry point with arity check.
478     //
479     // Generate the stack overflow handling; if the stack check in the function head fails,
480     // we need to call out to a helper function to throw the StackOverflowError.
481     stackOverflow.link(this);
482
483     emitStoreCodeOrigin(CodeOrigin(0));
484
485     if (maxFrameExtentForSlowPathCall)
486         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
487
488     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
489     
490     // The fast entry point into a function does not check the correct number of arguments
491     // have been passed to the call (we only use the fast entry point where we can statically
492     // determine the correct number of arguments have been passed, or have already checked).
493     // In cases where an arity check is necessary, we enter here.
494     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
495     m_arityCheck = label();
496     compileEntry();
497
498     load32(AssemblyHelpers::payloadFor((VirtualRegister)CallFrameSlot::argumentCount), GPRInfo::regT1);
499     branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
500     emitStoreCodeOrigin(CodeOrigin(0));
501     if (maxFrameExtentForSlowPathCall)
502         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
503     m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
504     if (maxFrameExtentForSlowPathCall)
505         addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
506     branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
507     emitStoreCodeOrigin(CodeOrigin(0));
508     move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
509     Call callArityFixup = nearCall();
510     jump(fromArityCheck);
511
512     // Generate slow path code.
513     m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
514     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
515     
516     compileExceptionHandlers();
517     linkOSRExits();
518     
519     // Create OSR entry trampolines if necessary.
520     m_speculative->createOSREntries();
521     setEndOfCode();
522
523     // === Link ===
524     auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
525     if (linkBuffer->didFailToAllocate()) {
526         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
527         return;
528     }
529     link(*linkBuffer);
530     m_speculative->linkOSREntries(*linkBuffer);
531     
532     m_jitCode->shrinkToFit();
533     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
534
535     linkBuffer->link(callArityFixup, FunctionPtr<JITThunkPtrTag>(vm()->getCTIStub(arityFixupGenerator).code()));
536
537     disassemble(*linkBuffer);
538
539     MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = linkBuffer->locationOf<JSEntryPtrTag>(m_arityCheck);
540
541     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
542         m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck);
543 }
544
545 void JITCompiler::disassemble(LinkBuffer& linkBuffer)
546 {
547     if (shouldDumpDisassembly()) {
548         m_disassembler->dump(linkBuffer);
549         linkBuffer.didAlreadyDisassemble();
550     }
551     
552     if (UNLIKELY(m_graph.m_plan.compilation))
553         m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
554 }
555
556 #if USE(JSVALUE32_64)
557 void* JITCompiler::addressOfDoubleConstant(Node* node)
558 {
559     double value = node->asNumber();
560     int64_t valueBits = bitwise_cast<int64_t>(value);
561     auto it = m_graph.m_doubleConstantsMap.find(valueBits);
562     if (it != m_graph.m_doubleConstantsMap.end())
563         return it->second;
564
565     if (!m_graph.m_doubleConstants)
566         m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
567
568     double* addressInConstantPool = m_graph.m_doubleConstants->add();
569     *addressInConstantPool = value;
570     m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
571     return addressInConstantPool;
572 }
573 #endif
574
575 void JITCompiler::noticeCatchEntrypoint(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer, Vector<FlushFormat>&& argumentFormats)
576 {
577     RELEASE_ASSERT(basicBlock.isCatchEntrypoint);
578     RELEASE_ASSERT(basicBlock.intersectionOfCFAHasVisited); // An entrypoint is reachable by definition.
579     m_jitCode->common.appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.locationOf<ExceptionHandlerPtrTag>(blockHead), WTFMove(argumentFormats));
580 }
581
582 void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
583 {
584     RELEASE_ASSERT(!basicBlock.isCatchEntrypoint);
585
586     // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
587     if (!basicBlock.intersectionOfCFAHasVisited)
588         return;
589
590     OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
591
592     entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
593         
594     // Fix the expected values: in our protocol, a dead variable will have an expected
595     // value of (None, []). But the old JIT may stash some values there. So we really
596     // need (Top, TOP).
597     for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
598         Node* node = basicBlock.variablesAtHead.argument(argument);
599         if (!node || !node->shouldGenerate())
600             entry->m_expectedValues.argument(argument).makeHeapTop();
601     }
602     for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
603         Node* node = basicBlock.variablesAtHead.local(local);
604         if (!node || !node->shouldGenerate())
605             entry->m_expectedValues.local(local).makeHeapTop();
606         else {
607             VariableAccessData* variable = node->variableAccessData();
608             entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
609                 
610             switch (variable->flushFormat()) {
611             case FlushedDouble:
612                 entry->m_localsForcedDouble.set(local);
613                 break;
614             case FlushedInt52:
615                 entry->m_localsForcedAnyInt.set(local);
616                 break;
617             default:
618                 break;
619             }
620             
621             if (variable->local() != variable->machineLocal()) {
622                 entry->m_reshufflings.append(
623                     OSREntryReshuffling(
624                         variable->local().offset(), variable->machineLocal().offset()));
625             }
626         }
627     }
628         
629     entry->m_reshufflings.shrinkToFit();
630 }
631
632 void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail)
633 {
634     OSRExit exit(kind, JSValueRegs(), MethodOfGettingAValueProfile(), m_speculative.get(), eventStreamIndex);
635     exit.m_codeOrigin = opCatchOrigin;
636     exit.m_exceptionHandlerCallSiteIndex = callSite;
637     OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail);
638     jitCode()->appendOSRExit(exit);
639     m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite });
640 }
641
642 void JITCompiler::exceptionCheck()
643 {
644     // It's important that we use origin.forExit here. Consider if we hoist string
645     // addition outside a loop, and that we exit at the point of that concatenation
646     // from an out of memory exception.
647     // If the original loop had a try/catch around string concatenation, if we "catch"
648     // that exception inside the loop, then the loops induction variable will be undefined 
649     // in the OSR exit value recovery. It's more defensible for the string concatenation, 
650     // then, to not be caught by the for loops' try/catch.
651     // Here is the program I'm speaking about:
652     //
653     // >>>> lets presume "c = a + b" gets hoisted here.
654     // for (var i = 0; i < length; i++) {
655     //     try {
656     //         c = a + b
657     //     } catch(e) { 
658     //         If we threw an out of memory error, and we cought the exception
659     //         right here, then "i" would almost certainly be undefined, which
660     //         would make no sense.
661     //         ... 
662     //     }
663     // }
664     CodeOrigin opCatchOrigin;
665     HandlerInfo* exceptionHandler;
666     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); 
667     if (willCatchException) {
668         unsigned streamIndex = m_speculative->m_outOfLineStreamIndex ? *m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size();
669         MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck(*vm());
670         // We assume here that this is called after callOpeartion()/appendCall() is called.
671         appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException);
672     } else
673         m_exceptionChecks.append(emitExceptionCheck(*vm()));
674 }
675
676 CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex)
677 {
678     CodeOrigin opCatchOrigin;
679     HandlerInfo* exceptionHandler;
680     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler);
681     CallSiteIndex callSite = addCallSite(callSiteCodeOrigin);
682     if (willCatchException)
683         appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite);
684     return callSite;
685 }
686
687 void JITCompiler::setEndOfMainPath()
688 {
689     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic);
690     if (LIKELY(!m_disassembler))
691         return;
692     m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints());
693 }
694
695 void JITCompiler::setEndOfCode()
696 {
697     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
698     if (LIKELY(!m_disassembler))
699         return;
700     m_disassembler->setEndOfCode(labelIgnoringWatchpoints());
701 }
702
703 void JITCompiler::makeCatchOSREntryBuffer()
704 {
705     if (m_graph.m_maxLocalsForCatchOSREntry) {
706         uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
707         m_jitCode->common.catchOSREntryBuffer = vm()->scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
708     }
709 }
710
711 } } // namespace JSC::DFG
712
713 #endif // ENABLE(DFG_JIT)