068221c3a19190142eded65a1cbc40e86e7700ab
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGJITCompiler.cpp
1 /*
2  * Copyright (C) 2011, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGJITCompiler.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArityCheckFailReturnThunks.h"
32 #include "CodeBlock.h"
33 #include "DFGFailedFinalizer.h"
34 #include "DFGInlineCacheWrapperInlines.h"
35 #include "DFGJITCode.h"
36 #include "DFGJITFinalizer.h"
37 #include "DFGOSRExitCompiler.h"
38 #include "DFGOperations.h"
39 #include "DFGRegisterBank.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DFGSpeculativeJIT.h"
42 #include "DFGThunks.h"
43 #include "JSCJSValueInlines.h"
44 #include "LinkBuffer.h"
45 #include "MaxFrameExtentForSlowPathCall.h"
46 #include "JSCInlines.h"
47 #include "VM.h"
48
49 namespace JSC { namespace DFG {
50
51 JITCompiler::JITCompiler(Graph& dfg)
52     : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
53     , m_graph(dfg)
54     , m_jitCode(adoptRef(new JITCode()))
55     , m_blockHeads(dfg.numBlocks())
56 {
57     if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
58         m_disassembler = adoptPtr(new Disassembler(dfg));
59 }
60
61 JITCompiler::~JITCompiler()
62 {
63 }
64
65 void JITCompiler::linkOSRExits()
66 {
67     ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
68     if (m_graph.compilation()) {
69         for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
70             OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
71             Vector<Label> labels;
72             if (!info.m_failureJumps.empty()) {
73                 for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
74                     labels.append(info.m_failureJumps.jumps()[j].label());
75             } else
76                 labels.append(info.m_replacementSource);
77             m_exitSiteLabels.append(labels);
78         }
79     }
80     
81     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
82         OSRExit& exit = m_jitCode->osrExit[i];
83         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
84         JumpList& failureJumps = info.m_failureJumps;
85         if (!failureJumps.empty())
86             failureJumps.link(this);
87         else
88             info.m_replacementDestination = label();
89         jitAssertHasValidCallFrame();
90         store32(TrustedImm32(i), &vm()->osrExitIndex);
91         exit.setPatchableCodeOffset(patchableJump());
92     }
93 }
94
95 void JITCompiler::compileEntry()
96 {
97     // This code currently matches the old JIT. In the function header we need to
98     // save return address and call frame via the prologue and perform a fast stack check.
99     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
100     // We'll need to convert the remaining cti_ style calls (specifically the stack
101     // check) which will be dependent on stack layout. (We'd need to account for this in
102     // both normal return code and when jumping to an exception handler).
103     emitFunctionPrologue();
104     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
105     jitAssertTagsInPlace();
106 }
107
108 void JITCompiler::compileBody()
109 {
110     // We generate the speculative code path, followed by OSR exit code to return
111     // to the old JIT code if speculations fail.
112
113     bool compiledSpeculative = m_speculative->compile();
114     ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
115 }
116
117 void JITCompiler::compileExceptionHandlers()
118 {
119     if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
120         return;
121
122     Jump doLookup;
123
124     if (!m_exceptionChecksWithCallFrameRollback.empty()) {
125         m_exceptionChecksWithCallFrameRollback.link(this);
126         emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1);
127         doLookup = jump();
128     }
129
130     if (!m_exceptionChecks.empty())
131         m_exceptionChecks.link(this);
132
133     // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
134     move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
135
136     if (doLookup.isSet())
137         doLookup.link(this);
138
139     move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
140
141 #if CPU(X86)
142     // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
143     poke(GPRInfo::argumentGPR0);
144     poke(GPRInfo::argumentGPR1, 1);
145 #endif
146     m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
147     jumpToExceptionHandler();
148 }
149
150 void JITCompiler::link(LinkBuffer& linkBuffer)
151 {
152     // Link the code, populate data in CodeBlock data structures.
153     m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
154     m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
155
156     if (!m_graph.m_plan.inlineCallFrames->isEmpty())
157         m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
158     
159     m_jitCode->common.machineCaptureStart = m_graph.m_machineCaptureStart;
160     m_jitCode->common.slowArguments = std::move(m_graph.m_slowArguments);
161
162 #if USE(JSVALUE32_64)
163     m_jitCode->common.doubleConstants = std::move(m_graph.m_doubleConstants);
164 #endif
165
166     BitVector usedJumpTables;
167     for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
168         SwitchData& data = **iter;
169         if (!data.didUseJumpTable)
170             continue;
171         
172         if (data.kind == SwitchString)
173             continue;
174         
175         RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
176         
177         usedJumpTables.set(data.switchTableIndex);
178         SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
179         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
180         table.ctiOffsets.grow(table.branchOffsets.size());
181         for (unsigned j = table.ctiOffsets.size(); j--;)
182             table.ctiOffsets[j] = table.ctiDefault;
183         for (unsigned j = data.cases.size(); j--;) {
184             SwitchCase& myCase = data.cases[j];
185             table.ctiOffsets[myCase.value.switchLookupValue() - table.min] =
186                 linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
187         }
188     }
189     
190     for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
191         if (usedJumpTables.get(i))
192             continue;
193         
194         m_codeBlock->switchJumpTable(i).clear();
195     }
196
197     // NOTE: we cannot clear string switch tables because (1) we're running concurrently
198     // and we cannot deref StringImpl's and (2) it would be weird to deref those
199     // StringImpl's since we refer to them.
200     for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
201         SwitchData& data = **switchDataIter;
202         if (!data.didUseJumpTable)
203             continue;
204         
205         if (data.kind != SwitchString)
206             continue;
207         
208         StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
209         table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
210         StringJumpTable::StringOffsetTable::iterator iter;
211         StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
212         for (iter = table.offsetTable.begin(); iter != end; ++iter)
213             iter->value.ctiOffset = table.ctiDefault;
214         for (unsigned j = data.cases.size(); j--;) {
215             SwitchCase& myCase = data.cases[j];
216             iter = table.offsetTable.find(myCase.value.stringImpl());
217             RELEASE_ASSERT(iter != end);
218             iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
219         }
220     }
221
222     // Link all calls out from the JIT code to their respective functions.
223     for (unsigned i = 0; i < m_calls.size(); ++i)
224         linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
225
226     for (unsigned i = m_getByIds.size(); i--;)
227         m_getByIds[i].finalize(linkBuffer);
228     for (unsigned i = m_putByIds.size(); i--;)
229         m_putByIds[i].finalize(linkBuffer);
230
231     for (unsigned i = 0; i < m_ins.size(); ++i) {
232         StructureStubInfo& info = *m_ins[i].m_stubInfo;
233         CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
234         info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done));
235         info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump));
236         info.callReturnLocation = callReturnLocation;
237         info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
238     }
239     
240     for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
241         JSCallRecord& record = m_jsCalls[i];
242         CallLinkInfo& info = *record.m_info;
243         ThunkGenerator generator = linkThunkGeneratorFor(
244             info.callType == CallLinkInfo::Construct ? CodeForConstruct : CodeForCall,
245             RegisterPreservationNotRequired);
246         linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(generator).code().executableAddress()));
247         info.callReturnLocation = linkBuffer.locationOfNearCall(record.m_slowCall);
248         info.hotPathBegin = linkBuffer.locationOf(record.m_targetToCheck);
249         info.hotPathOther = linkBuffer.locationOfNearCall(record.m_fastCall);
250     }
251     
252     MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
253     CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
254     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
255         OSRExit& exit = m_jitCode->osrExit[i];
256         OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
257         linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
258         exit.correctJump(linkBuffer);
259         if (info.m_replacementSource.isSet()) {
260             m_jitCode->common.jumpReplacements.append(JumpReplacement(
261                 linkBuffer.locationOf(info.m_replacementSource),
262                 linkBuffer.locationOf(info.m_replacementDestination)));
263         }
264     }
265     
266     if (m_graph.compilation()) {
267         ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
268         for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
269             Vector<Label>& labels = m_exitSiteLabels[i];
270             Vector<const void*> addresses;
271             for (unsigned j = 0; j < labels.size(); ++j)
272                 addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
273             m_graph.compilation()->addOSRExitSite(addresses);
274         }
275     } else
276         ASSERT(!m_exitSiteLabels.size());
277     
278     m_jitCode->common.compilation = m_graph.compilation();
279     
280 }
281
282 void JITCompiler::compile()
283 {
284     SamplingRegion samplingRegion("DFG Backend");
285
286     setStartOfCode();
287     compileEntry();
288     m_speculative = adoptPtr(new SpeculativeJIT(*this));
289     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
290     checkStackPointerAlignment();
291     compileBody();
292     setEndOfMainPath();
293
294     // Generate slow path code.
295     m_speculative->runSlowPathGenerators();
296     
297     compileExceptionHandlers();
298     linkOSRExits();
299     
300     // Create OSR entry trampolines if necessary.
301     m_speculative->createOSREntries();
302     setEndOfCode();
303 }
304
305 void JITCompiler::link()
306 {
307     OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
308     if (linkBuffer->didFailToAllocate()) {
309         m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
310         return;
311     }
312     
313     link(*linkBuffer);
314     m_speculative->linkOSREntries(*linkBuffer);
315     
316     m_jitCode->shrinkToFit();
317     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
318
319     disassemble(*linkBuffer);
320     
321     m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
322         m_graph.m_plan, m_jitCode.release(), linkBuffer.release()));
323 }
324
325 void JITCompiler::compileFunction()
326 {
327     SamplingRegion samplingRegion("DFG Backend");
328     
329     setStartOfCode();
330     compileEntry();
331
332     // === Function header code generation ===
333     // This is the main entry point, without performing an arity check.
334     // If we needed to perform an arity check we will already have moved the return address,
335     // so enter after this.
336     Label fromArityCheck(this);
337     // Plant a check that sufficient space is available in the JSStack.
338     addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
339     Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);
340
341     // Move the stack pointer down to accommodate locals
342     addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
343     checkStackPointerAlignment();
344
345     // === Function body code generation ===
346     m_speculative = adoptPtr(new SpeculativeJIT(*this));
347     compileBody();
348     setEndOfMainPath();
349
350     // === Function footer code generation ===
351     //
352     // Generate code to perform the stack overflow handling (if the stack check in
353     // the function header fails), and generate the entry point with arity check.
354     //
355     // Generate the stack overflow handling; if the stack check in the function head fails,
356     // we need to call out to a helper function to throw the StackOverflowError.
357     stackOverflow.link(this);
358
359     emitStoreCodeOrigin(CodeOrigin(0));
360
361     if (maxFrameExtentForSlowPathCall)
362         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
363
364     m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
365     
366     // The fast entry point into a function does not check the correct number of arguments
367     // have been passed to the call (we only use the fast entry point where we can statically
368     // determine the correct number of arguments have been passed, or have already checked).
369     // In cases where an arity check is necessary, we enter here.
370     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
371     m_arityCheck = label();
372     compileEntry();
373
374     load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
375     branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
376     emitStoreCodeOrigin(CodeOrigin(0));
377     if (maxFrameExtentForSlowPathCall)
378         addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
379     m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
380     if (maxFrameExtentForSlowPathCall)
381         addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
382     branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this);
383     emitStoreCodeOrigin(CodeOrigin(0));
384     GPRReg thunkReg;
385 #if USE(JSVALUE64)
386     thunkReg = GPRInfo::regT7;
387 #else
388     thunkReg = GPRInfo::regT5;
389 #endif
390     move(TrustedImmPtr(m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters())), thunkReg);
391     loadPtr(BaseIndex(thunkReg, GPRInfo::regT0, timesPtr()), thunkReg);
392     m_callArityFixup = call();
393     jump(fromArityCheck);
394     
395     // Generate slow path code.
396     m_speculative->runSlowPathGenerators();
397     
398     compileExceptionHandlers();
399     linkOSRExits();
400     
401     // Create OSR entry trampolines if necessary.
402     m_speculative->createOSREntries();
403     setEndOfCode();
404 }
405
406 void JITCompiler::linkFunction()
407 {
408     // === Link ===
409     OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
410     if (linkBuffer->didFailToAllocate()) {
411         m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
412         return;
413     }
414     link(*linkBuffer);
415     m_speculative->linkOSREntries(*linkBuffer);
416     
417     m_jitCode->shrinkToFit();
418     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
419     
420     linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixup)).code().executableAddress()));
421     
422     disassemble(*linkBuffer);
423
424     MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
425
426     m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
427         m_graph.m_plan, m_jitCode.release(), linkBuffer.release(), withArityCheck));
428 }
429
430 void JITCompiler::disassemble(LinkBuffer& linkBuffer)
431 {
432     if (shouldShowDisassembly())
433         m_disassembler->dump(linkBuffer);
434     
435     if (m_graph.m_plan.compilation)
436         m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
437 }
438
439 #if USE(JSVALUE32_64)
440 void* JITCompiler::addressOfDoubleConstant(Node* node)
441 {
442     ASSERT(m_graph.isNumberConstant(node));
443     JSValue jsvalue = node->valueOfJSConstant(codeBlock());
444     ASSERT(jsvalue.isDouble());
445
446     double value = jsvalue.asDouble();
447     int64_t valueBits = bitwise_cast<int64_t>(value);
448     auto it = m_graph.m_doubleConstantsMap.find(valueBits);
449     if (it != m_graph.m_doubleConstantsMap.end())
450         return it->second;
451
452     if (!m_graph.m_doubleConstants)
453         m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
454
455     double* addressInConstantPool = m_graph.m_doubleConstants->add();
456     *addressInConstantPool = value;
457     m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
458     return addressInConstantPool;
459 }
460 #endif
461
462 } } // namespace JSC::DFG
463
464 #endif // ENABLE(DFG_JIT)