6fea38dd4e8a903eb05e3a0692663334c2a5614a
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGJITCompiler.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGJITCompiler.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "DFGFailedFinalizer.h"
33 #include "DFGInlineCacheWrapperInlines.h"
34 #include "DFGJITCode.h"
35 #include "DFGJITFinalizer.h"
36 #include "DFGOSRExit.h"
37 #include "DFGOperations.h"
38 #include "DFGRegisterBank.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DFGSpeculativeJIT.h"
41 #include "DFGThunks.h"
42 #include "JSCInlines.h"
43 #include "JSCJSValueInlines.h"
44 #include "LinkBuffer.h"
45 #include "MaxFrameExtentForSlowPathCall.h"
46 #include "StructureStubInfo.h"
47 #include "ThunkGenerators.h"
48 #include "VM.h"
49
50 namespace JSC { namespace DFG {
51
52 JITCompiler::JITCompiler(Graph& dfg)
53     : CCallHelpers(dfg.m_codeBlock)
54     , m_graph(dfg)
55     , m_jitCode(adoptRef(new JITCode()))
56     , m_blockHeads(dfg.numBlocks())
57     , m_pcToCodeOriginMapBuilder(dfg.m_vm)
58 {
59     if (UNLIKELY(shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler))
60         m_disassembler = std::make_unique<Disassembler>(dfg);
61 #if ENABLE(FTL_JIT)
62     m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy);
63     for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes)
64         m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger);
65 #endif
66 }
67
68 JITCompiler::~JITCompiler()
69 {
70 }
71
72 void JITCompiler::linkOSRExits()
73 {
74     ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
75     if (UNLIKELY(m_graph.compilation())) {
76         for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
77             OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
78             Vector<Label> labels;
79             if (!info.m_failureJumps.empty()) {
80                 for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
81                     labels.append(info.m_failureJumps.jumps()[j].label());
82             } else
83                 labels.append(info.m_replacementSource);
84             m_exitSiteLabels.append(labels);
85         }
86     }
87     
88     MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitThunkGenerator);
89     CodeLocationLabel osrExitThunkLabel = CodeLocationLabel(osrExitThunk.code());
90     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
91         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
92         JumpList& failureJumps = info.m_failureJumps;
93         if (!failureJumps.empty())
94             failureJumps.link(this);
95         else
96             info.m_replacementDestination = label();
97
98         jitAssertHasValidCallFrame();
99         store32(TrustedImm32(i), &vm()->osrExitIndex);
100         if (Options::useProbeOSRExit()) {
101             Jump target = jump();
102             addLinkTask([target, osrExitThunkLabel] (LinkBuffer& linkBuffer) {
103                 linkBuffer.link(target, osrExitThunkLabel);
104             });
105         } else {
106             OSRExit& exit = m_jitCode->osrExit[i];
107             exit.setPatchableCodeOffset(patchableJump());
108         }
109     }
110 }
111
112 void JITCompiler::compileEntry()
113 {
114     // This code currently matches the old JIT. In the function header we need to
115     // save return address and call frame via the prologue and perform a fast stack check.
116     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
117     // We'll need to convert the remaining cti_ style calls (specifically the stack
118     // check) which will be dependent on stack layout. (We'd need to account for this in
119     // both normal return code and when jumping to an exception handler).
120     emitFunctionPrologue();
121     emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
122 }
123
124 void JITCompiler::compileSetupRegistersForEntry()
125 {
126     emitSaveCalleeSaves();
127     emitMaterializeTagCheckRegisters();    
128 }
129
130 void JITCompiler::compileEntryExecutionFlag()
131 {
132 #if ENABLE(FTL_JIT)
133     if (m_graph.m_plan.canTierUpAndOSREnter())
134         store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry);
135 #endif // ENABLE(FTL_JIT)
136 }
137
138 void JITCompiler::compileBody()
139 {
140     // We generate the speculative code path, followed by OSR exit code to return
141     // to the old JIT code if speculations fail.
142
143     bool compiledSpeculative = m_speculative->compile();
144     ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
145 }
146
147 void JITCompiler::compileExceptionHandlers()
148 {
149     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
150         m_exceptionChecksWithCallFrameRollback.link(this);
151
152         copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
153
154         // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
155         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
156         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
157         addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
158
159 #if CPU(X86)
160         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
161         poke(GPRInfo::argumentGPR0);
162         poke(GPRInfo::argumentGPR1, 1);
163 #endif
164         m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
165
166         jumpToExceptionHandler(*vm());
167     }
168
169     if (!m_exceptionChecks.empty()) {
170         m_exceptionChecks.link(this);
171
172         copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
173
174         // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
175         move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
176         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
177
178 #if CPU(X86)
179         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
180         poke(GPRInfo::argumentGPR0);
181         poke(GPRInfo::argumentGPR1, 1);
182 #endif
183         m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
184
185         jumpToExceptionHandler(*vm());
186     }
187 }
188
189 void JITCompiler::link(LinkBuffer& linkBuffer)
190 {
191     // Link the code, populate data in CodeBlock data structures.
192     m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
193     m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
194
195     if (!m_graph.m_plan.inlineCallFrames->isEmpty())
196         m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
197     
198 #if USE(JSVALUE32_64)
199     m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
200 #endif
201     
202     m_graph.registerFrozenValues();
203
204     BitVector usedJumpTables;
205     for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
206         SwitchData& data = **iter;
207         if (!data.didUseJumpTable)
208             continue;
209         
210         if (data.kind == SwitchString)
211             continue;
212         
213         RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
214         
215         usedJumpTables.set(data.switchTableIndex);
216         SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
217         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
218         table.ctiOffsets.grow(table.branchOffsets.size());
219         for (unsigned j = table.ctiOffsets.size(); j--;)
220             table.ctiOffsets[j] = table.ctiDefault;
221         for (unsigned j = data.cases.size(); j--;) {
222             SwitchCase& myCase = data.cases[j];
223             table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
224                 linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
225         }
226     }
227     
228     for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
229         if (usedJumpTables.get(i))
230             continue;
231         
232         m_codeBlock->switchJumpTable(i).clear();
233     }
234
235     // NOTE: we cannot clear string switch tables because (1) we're running concurrently
236     // and we cannot deref StringImpl's and (2) it would be weird to deref those
237     // StringImpl's since we refer to them.
238     for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
239         SwitchData& data = **switchDataIter;
240         if (!data.didUseJumpTable)
241             continue;
242         
243         if (data.kind != SwitchString)
244             continue;
245         
246         StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
247         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
248         StringJumpTable::StringOffsetTable::iterator iter;
249         StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
250         for (iter = table.offsetTable.begin(); iter != end; ++iter)
251             iter->value.ctiOffset = table.ctiDefault;
252         for (unsigned j = data.cases.size(); j--;) {
253             SwitchCase& myCase = data.cases[j];
254             iter = table.offsetTable.find(myCase.value.stringImpl());
255             RELEASE_ASSERT(iter != end);
256             iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
257         }
258     }
259
260     // Link all calls out from the JIT code to their respective functions.
261     for (unsigned i = 0; i < m_calls.size(); ++i)
262         linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
263
264     for (unsigned i = m_getByIds.size(); i--;)
265         m_getByIds[i].finalize(linkBuffer);
266     for (unsigned i = m_getByIdsWithThis.size(); i--;)
267         m_getByIdsWithThis[i].finalize(linkBuffer);
268     for (unsigned i = m_putByIds.size(); i--;)
269         m_putByIds[i].finalize(linkBuffer);
270
271     for (unsigned i = 0; i < m_ins.size(); ++i) {
272         StructureStubInfo& info = *m_ins[i].m_stubInfo;
273
274         CodeLocationLabel start = linkBuffer.locationOf(m_ins[i].m_jump);
275         info.patch.start = start;
276
277         ptrdiff_t inlineSize = MacroAssembler::differenceBetweenCodePtr(
278             start, linkBuffer.locationOf(m_ins[i].m_done));
279         RELEASE_ASSERT(inlineSize >= 0);
280         info.patch.inlineSize = inlineSize;
281
282         info.patch.deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
283             start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call()));
284
285         info.patch.deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
286             start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
287     }
288     
289     for (auto& record : m_jsCalls) {
290         CallLinkInfo& info = *record.info;
291         linkBuffer.link(record.slowCall, FunctionPtr(vm()->getCTIStub(linkCallThunkGenerator).code()));
292         info.setCallLocations(
293             CodeLocationLabel(linkBuffer.locationOfNearCall(record.slowCall)),
294             CodeLocationLabel(linkBuffer.locationOf(record.targetToCheck)),
295             linkBuffer.locationOfNearCall(record.fastCall));
296     }
297     
298     for (JSDirectCallRecord& record : m_jsDirectCalls) {
299         CallLinkInfo& info = *record.info;
300         linkBuffer.link(record.call, linkBuffer.locationOf(record.slowPath));
301         info.setCallLocations(
302             CodeLocationLabel(),
303             linkBuffer.locationOf(record.slowPath),
304             linkBuffer.locationOfNearCall(record.call));
305     }
306     
307     for (JSDirectTailCallRecord& record : m_jsDirectTailCalls) {
308         CallLinkInfo& info = *record.info;
309         info.setCallLocations(
310             linkBuffer.locationOf(record.patchableJump),
311             linkBuffer.locationOf(record.slowPath),
312             linkBuffer.locationOfNearCall(record.call));
313     }
314     
315     MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
316     CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
317     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
318         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
319         if (!Options::useProbeOSRExit()) {
320             OSRExit& exit = m_jitCode->osrExit[i];
321             linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
322             exit.correctJump(linkBuffer);
323         }
324         if (info.m_replacementSource.isSet()) {
325             m_jitCode->common.jumpReplacements.append(JumpReplacement(
326                 linkBuffer.locationOf(info.m_replacementSource),
327                 linkBuffer.locationOf(info.m_replacementDestination)));
328         }
329     }
330     
331     if (UNLIKELY(m_graph.compilation())) {
332         ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
333         for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
334             Vector<Label>& labels = m_exitSiteLabels[i];
335             Vector<const void*> addresses;
336             for (unsigned j = 0; j < labels.size(); ++j)
337                 addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
338             m_graph.compilation()->addOSRExitSite(addresses);
339         }
340     } else
341         ASSERT(!m_exitSiteLabels.size());
342
343     m_jitCode->common.compilation = m_graph.compilation();
344     
345     // Link new DFG exception handlers and remove baseline JIT handlers.
346     m_codeBlock->clearExceptionHandlers();
347     for (unsigned  i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) {
348         OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo;
349         if (info.m_replacementDestination.isSet()) {
350             // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow.
351             // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
352             // If this *is set*, it means we will be landing at this code location from genericUnwind from an
353             // exception thrown in a child call frame.
354             CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination);
355             HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
356             CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
357             newExceptionHandler.start = callSite.bits();
358             newExceptionHandler.end = callSite.bits() + 1;
359             newExceptionHandler.nativeCode = catchLabel;
360             m_codeBlock->appendExceptionHandler(newExceptionHandler);
361         }
362     }
363
364     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
365         m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer));
366 }
367
368 static void emitStackOverflowCheck(JITCompiler& jit, MacroAssembler::JumpList& stackOverflow)
369 {
370     int frameTopOffset = virtualRegisterForLocal(jit.graph().requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register);
371     unsigned maxFrameSize = -frameTopOffset;
372
373     jit.addPtr(MacroAssembler::TrustedImm32(frameTopOffset), GPRInfo::callFrameRegister, GPRInfo::regT1);
374     if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
375         stackOverflow.append(jit.branchPtr(MacroAssembler::Above, GPRInfo::regT1, GPRInfo::callFrameRegister));
376     stackOverflow.append(jit.branchPtr(MacroAssembler::Above, MacroAssembler::AbsoluteAddress(jit.vm()->addressOfSoftStackLimit()), GPRInfo::regT1));
377 }
378
379 void JITCompiler::compile()
380 {
381     makeCatchOSREntryBuffer();
382
383     setStartOfCode();
384     compileEntry();
385     m_speculative = std::make_unique<SpeculativeJIT>(*this);
386
387     // Plant a check that sufficient space is available in the JSStack.
388     JumpList stackOverflow;
389     emitStackOverflowCheck(*this, stackOverflow);
390
391     addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister);
392     if (Options::zeroStackFrame())
393         clearStackFrame(GPRInfo::callFrameRegister, stackPointerRegister, GPRInfo::regT0, m_graph.frameRegisterCount() * sizeof(Register));
394     checkStackPointerAlignment();
395     compileSetupRegistersForEntry();
396     compileEntryExecutionFlag();
397     compileBody();
398     setEndOfMainPath();
399
400     // === Footer code generation ===
401     //
402     // Generate the stack overflow handling; if the stack check in the entry head fails,
403     // we need to call out to a helper function to throw the StackOverflowError.
404     stackOverflow.link(this);
405
406     emitStoreCodeOrigin(CodeOrigin(0));
407
408     if (maxFrameExtentForSlowPathCall)
409         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
410
411     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
412
413     // Generate slow path code.
414     m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
415     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
416     
417     compileExceptionHandlers();
418     linkOSRExits();
419     
420     // Create OSR entry trampolines if necessary.
421     m_speculative->createOSREntries();
422     setEndOfCode();
423
424     auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
425     if (linkBuffer->didFailToAllocate()) {
426         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
427         return;
428     }
429     
430     link(*linkBuffer);
431     m_speculative->linkOSREntries(*linkBuffer);
432
433     m_jitCode->shrinkToFit();
434     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
435
436     disassemble(*linkBuffer);
437     
438     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
439         m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer));
440 }
441
442 void JITCompiler::compileFunction()
443 {
444     makeCatchOSREntryBuffer();
445
446     setStartOfCode();
447     compileEntry();
448
449     // === Function header code generation ===
450     // This is the main entry point, without performing an arity check.
451     // If we needed to perform an arity check we will already have moved the return address,
452     // so enter after this.
453     Label fromArityCheck(this);
454     // Plant a check that sufficient space is available in the JSStack.
455     JumpList stackOverflow;
456     emitStackOverflowCheck(*this, stackOverflow);
457
458     // Move the stack pointer down to accommodate locals
459     addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister);
460     if (Options::zeroStackFrame())
461         clearStackFrame(GPRInfo::callFrameRegister, stackPointerRegister, GPRInfo::regT0, m_graph.frameRegisterCount() * sizeof(Register));
462     checkStackPointerAlignment();
463
464     compileSetupRegistersForEntry();
465     compileEntryExecutionFlag();
466
467     // === Function body code generation ===
468     m_speculative = std::make_unique<SpeculativeJIT>(*this);
469     compileBody();
470     setEndOfMainPath();
471
472     // === Function footer code generation ===
473     //
474     // Generate code to perform the stack overflow handling (if the stack check in
475     // the function header fails), and generate the entry point with arity check.
476     //
477     // Generate the stack overflow handling; if the stack check in the function head fails,
478     // we need to call out to a helper function to throw the StackOverflowError.
479     stackOverflow.link(this);
480
481     emitStoreCodeOrigin(CodeOrigin(0));
482
483     if (maxFrameExtentForSlowPathCall)
484         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
485
486     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
487     
488     // The fast entry point into a function does not check the correct number of arguments
489     // have been passed to the call (we only use the fast entry point where we can statically
490     // determine the correct number of arguments have been passed, or have already checked).
491     // In cases where an arity check is necessary, we enter here.
492     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
493     m_arityCheck = label();
494     compileEntry();
495
496     load32(AssemblyHelpers::payloadFor((VirtualRegister)CallFrameSlot::argumentCount), GPRInfo::regT1);
497     branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
498     emitStoreCodeOrigin(CodeOrigin(0));
499     if (maxFrameExtentForSlowPathCall)
500         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
501     m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
502     if (maxFrameExtentForSlowPathCall)
503         addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
504     branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
505     emitStoreCodeOrigin(CodeOrigin(0));
506     move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
507     m_callArityFixup = call();
508     jump(fromArityCheck);
509     
510     // Generate slow path code.
511     m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
512     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
513     
514     compileExceptionHandlers();
515     linkOSRExits();
516     
517     // Create OSR entry trampolines if necessary.
518     m_speculative->createOSREntries();
519     setEndOfCode();
520
521     // === Link ===
522     auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail);
523     if (linkBuffer->didFailToAllocate()) {
524         m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
525         return;
526     }
527     link(*linkBuffer);
528     m_speculative->linkOSREntries(*linkBuffer);
529     
530     m_jitCode->shrinkToFit();
531     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
532
533     linkBuffer->link(m_callArityFixup, FunctionPtr(vm()->getCTIStub(arityFixupGenerator).code()));
534
535     disassemble(*linkBuffer);
536
537     MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
538
539     m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
540         m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck);
541 }
542
543 void JITCompiler::disassemble(LinkBuffer& linkBuffer)
544 {
545     if (shouldDumpDisassembly()) {
546         m_disassembler->dump(linkBuffer);
547         linkBuffer.didAlreadyDisassemble();
548     }
549     
550     if (UNLIKELY(m_graph.m_plan.compilation))
551         m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
552 }
553
554 #if USE(JSVALUE32_64)
555 void* JITCompiler::addressOfDoubleConstant(Node* node)
556 {
557     double value = node->asNumber();
558     int64_t valueBits = bitwise_cast<int64_t>(value);
559     auto it = m_graph.m_doubleConstantsMap.find(valueBits);
560     if (it != m_graph.m_doubleConstantsMap.end())
561         return it->second;
562
563     if (!m_graph.m_doubleConstants)
564         m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
565
566     double* addressInConstantPool = m_graph.m_doubleConstants->add();
567     *addressInConstantPool = value;
568     m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
569     return addressInConstantPool;
570 }
571 #endif
572
573 void JITCompiler::noticeCatchEntrypoint(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer, Vector<FlushFormat>&& argumentFormats)
574 {
575     RELEASE_ASSERT(basicBlock.isCatchEntrypoint);
576     RELEASE_ASSERT(basicBlock.intersectionOfCFAHasVisited); // An entrypoint is reachable by definition.
577     m_jitCode->common.appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.locationOf(blockHead).executableAddress(), WTFMove(argumentFormats));
578 }
579
580 void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
581 {
582     RELEASE_ASSERT(!basicBlock.isCatchEntrypoint);
583
584     // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
585     if (!basicBlock.intersectionOfCFAHasVisited)
586         return;
587
588     OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
589
590     entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
591         
592     // Fix the expected values: in our protocol, a dead variable will have an expected
593     // value of (None, []). But the old JIT may stash some values there. So we really
594     // need (Top, TOP).
595     for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
596         Node* node = basicBlock.variablesAtHead.argument(argument);
597         if (!node || !node->shouldGenerate())
598             entry->m_expectedValues.argument(argument).makeHeapTop();
599     }
600     for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
601         Node* node = basicBlock.variablesAtHead.local(local);
602         if (!node || !node->shouldGenerate())
603             entry->m_expectedValues.local(local).makeHeapTop();
604         else {
605             VariableAccessData* variable = node->variableAccessData();
606             entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
607                 
608             switch (variable->flushFormat()) {
609             case FlushedDouble:
610                 entry->m_localsForcedDouble.set(local);
611                 break;
612             case FlushedInt52:
613                 entry->m_localsForcedAnyInt.set(local);
614                 break;
615             default:
616                 break;
617             }
618             
619             if (variable->local() != variable->machineLocal()) {
620                 entry->m_reshufflings.append(
621                     OSREntryReshuffling(
622                         variable->local().offset(), variable->machineLocal().offset()));
623             }
624         }
625     }
626         
627     entry->m_reshufflings.shrinkToFit();
628 }
629
630 void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail)
631 {
632     OSRExit exit(kind, JSValueRegs(), MethodOfGettingAValueProfile(), m_speculative.get(), eventStreamIndex);
633     exit.m_codeOrigin = opCatchOrigin;
634     exit.m_exceptionHandlerCallSiteIndex = callSite;
635     OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail);
636     jitCode()->appendOSRExit(exit);
637     m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite });
638 }
639
640 void JITCompiler::exceptionCheck()
641 {
642     // It's important that we use origin.forExit here. Consider if we hoist string
643     // addition outside a loop, and that we exit at the point of that concatenation
644     // from an out of memory exception.
645     // If the original loop had a try/catch around string concatenation, if we "catch"
646     // that exception inside the loop, then the loops induction variable will be undefined 
647     // in the OSR exit value recovery. It's more defensible for the string concatenation, 
648     // then, to not be caught by the for loops' try/catch.
649     // Here is the program I'm speaking about:
650     //
651     // >>>> lets presume "c = a + b" gets hoisted here.
652     // for (var i = 0; i < length; i++) {
653     //     try {
654     //         c = a + b
655     //     } catch(e) { 
656     //         If we threw an out of memory error, and we cought the exception
657     //         right here, then "i" would almost certainly be undefined, which
658     //         would make no sense.
659     //         ... 
660     //     }
661     // }
662     CodeOrigin opCatchOrigin;
663     HandlerInfo* exceptionHandler;
664     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); 
665     if (willCatchException) {
666         unsigned streamIndex = m_speculative->m_outOfLineStreamIndex ? *m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size();
667         MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck(*vm());
668         // We assume here that this is called after callOpeartion()/appendCall() is called.
669         appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException);
670     } else
671         m_exceptionChecks.append(emitExceptionCheck(*vm()));
672 }
673
674 CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex)
675 {
676     CodeOrigin opCatchOrigin;
677     HandlerInfo* exceptionHandler;
678     bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler);
679     CallSiteIndex callSite = addCallSite(callSiteCodeOrigin);
680     if (willCatchException)
681         appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite);
682     return callSite;
683 }
684
685 void JITCompiler::setEndOfMainPath()
686 {
687     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic);
688     if (LIKELY(!m_disassembler))
689         return;
690     m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints());
691 }
692
693 void JITCompiler::setEndOfCode()
694 {
695     m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
696     if (LIKELY(!m_disassembler))
697         return;
698     m_disassembler->setEndOfCode(labelIgnoringWatchpoints());
699 }
700
701 void JITCompiler::makeCatchOSREntryBuffer()
702 {
703     if (m_graph.m_maxLocalsForCatchOSREntry) {
704         uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
705         m_jitCode->common.catchOSREntryBuffer = vm()->scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
706     }
707 }
708
709 } } // namespace JSC::DFG
710
711 #endif // ENABLE(DFG_JIT)