fourthTier: CFA should defend against results seeming inconsistent due to a watchpoin...
[WebKit.git] / Source / JavaScriptCore / dfg / DFGJITCompiler.cpp
1 /*
2  * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGJITCompiler.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "DFGJITCode.h"
33 #include "DFGOSRExitCompiler.h"
34 #include "DFGOperations.h"
35 #include "DFGRegisterBank.h"
36 #include "DFGSlowPathGenerator.h"
37 #include "DFGSpeculativeJIT.h"
38 #include "DFGThunks.h"
39 #include "JSCJSValueInlines.h"
40 #include "VM.h"
41 #include "LinkBuffer.h"
42
43 namespace JSC { namespace DFG {
44
45 JITCompiler::JITCompiler(Graph& dfg)
46     : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
47     , m_graph(dfg)
48     , m_jitCode(adoptRef(new JITCode()))
49     , m_currentCodeOriginIndex(0)
50 {
51     if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
52         m_disassembler = adoptPtr(new Disassembler(dfg));
53 }
54
55 void JITCompiler::linkOSRExits()
56 {
57     ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size());
58     if (m_graph.m_compilation) {
59         for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
60             OSRExit& exit = m_jitCode->osrExit[i];
61             Vector<Label> labels;
62             if (exit.m_watchpointIndex == std::numeric_limits<unsigned>::max()) {
63                 OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
64                 for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
65                     labels.append(info.m_failureJumps.jumps()[j].label());
66             } else
67                 labels.append(m_jitCode->watchpoints[exit.m_watchpointIndex].sourceLabel());
68             m_exitSiteLabels.append(labels);
69         }
70     }
71     
72     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
73         OSRExit& exit = m_jitCode->osrExit[i];
74         JumpList& failureJumps = m_exitCompilationInfo[i].m_failureJumps;
75         ASSERT(failureJumps.empty() == (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max()));
76         if (exit.m_watchpointIndex == std::numeric_limits<unsigned>::max())
77             failureJumps.link(this);
78         else
79             m_jitCode->watchpoints[exit.m_watchpointIndex].setDestination(label());
80         jitAssertHasValidCallFrame();
81         store32(TrustedImm32(i), &vm()->osrExitIndex);
82         exit.setPatchableCodeOffset(patchableJump());
83     }
84 }
85
86 void JITCompiler::compileEntry()
87 {
88     // This code currently matches the old JIT. In the function header we need to
89     // pop the return address (since we do not allow any recursion on the machine
90     // stack), and perform a fast stack check.
91     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
92     // We'll need to convert the remaining cti_ style calls (specifically the stack
93     // check) which will be dependent on stack layout. (We'd need to account for this in
94     // both normal return code and when jumping to an exception handler).
95     preserveReturnAddressAfterCall(GPRInfo::regT2);
96     emitPutToCallFrameHeader(GPRInfo::regT2, JSStack::ReturnPC);
97     emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
98 }
99
100 void JITCompiler::compileBody(SpeculativeJIT& speculative)
101 {
102     // We generate the speculative code path, followed by OSR exit code to return
103     // to the old JIT code if speculations fail.
104
105 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_FUNCTION)
106     // Handy debug tool!
107     breakpoint();
108 #endif
109     
110     bool compiledSpeculative = speculative.compile();
111     ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
112 }
113
114 void JITCompiler::compileExceptionHandlers()
115 {
116     // Iterate over the m_calls vector, checking for jumps to link.
117     bool didLinkExceptionCheck = false;
118     for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
119         Jump& exceptionCheck = m_exceptionChecks[i].m_exceptionCheck;
120         if (exceptionCheck.isSet()) {
121             exceptionCheck.link(this);
122             didLinkExceptionCheck = true;
123         }
124     }
125
126     // If any exception checks were linked, generate code to lookup a handler.
127     if (didLinkExceptionCheck) {
128         // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
129         // the index into the CodeBlock's callReturnIndexVector corresponding to the
130         // call that threw the exception (this was set in nonPreservedNonReturnGPR, when
131         // the exception check was planted).
132         move(GPRInfo::nonPreservedNonReturnGPR, GPRInfo::argumentGPR1);
133         move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
134 #if CPU(X86)
135         // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
136         poke(GPRInfo::argumentGPR0);
137         poke(GPRInfo::argumentGPR1, 1);
138 #endif
139         m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
140         // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
141         // and the address of the handler in returnValueGPR2.
142         jump(GPRInfo::returnValueGPR2);
143     }
144 }
145
146 void JITCompiler::link(LinkBuffer& linkBuffer)
147 {
148     // Link the code, populate data in CodeBlock data structures.
149 #if DFG_ENABLE(DEBUG_VERBOSE)
150     dataLogF("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
151 #endif
152
153     // Link all calls out from the JIT code to their respective functions.
154     for (unsigned i = 0; i < m_calls.size(); ++i)
155         linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
156
157     m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size());
158     for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
159         unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
160         CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin;
161         while (codeOrigin.inlineCallFrame)
162             codeOrigin = codeOrigin.inlineCallFrame->caller;
163         unsigned exceptionInfo = codeOrigin.bytecodeIndex;
164         m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
165     }
166
167     Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow>& codeOrigins = m_codeBlock->codeOrigins();
168     codeOrigins.resize(m_exceptionChecks.size());
169     
170     for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
171         CallExceptionRecord& record = m_exceptionChecks[i];
172         unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
173         codeOrigins[i].codeOrigin = record.m_codeOrigin;
174         codeOrigins[i].callReturnOffset = returnAddressOffset;
175     }
176     
177     m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
178     for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
179         StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
180         CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call());
181         info.codeOrigin = m_propertyAccesses[i].m_codeOrigin;
182         info.callReturnLocation = callReturnLocation;
183         info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation);
184         info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck));
185 #if USE(JSVALUE64)
186         info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore));
187 #else
188         info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore));
189         info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore));
190 #endif
191         info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label()));
192         info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done));
193         info.patch.dfg.deltaCallToStorageLoad = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_propertyStorageLoad));
194         info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR;
195 #if USE(JSVALUE64)
196         info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
197 #else
198         info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
199         info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
200 #endif
201         m_propertyAccesses[i].m_usedRegisters.copyInfo(info.patch.dfg.usedRegisters);
202         info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
203     }
204     
205     m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
206     for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
207         CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
208         info.callType = m_jsCalls[i].m_callType;
209         info.isDFG = true;
210         info.codeOrigin = m_jsCalls[i].m_codeOrigin;
211         linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress()));
212         info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall);
213         info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
214         info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
215         info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee);
216     }
217     
218     MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
219     CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
220     for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
221         OSRExit& exit = m_jitCode->osrExit[i];
222         linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
223         exit.correctJump(linkBuffer);
224         if (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max())
225             m_jitCode->watchpoints[exit.m_watchpointIndex].correctLabels(linkBuffer);
226     }
227     
228     if (m_graph.m_compilation) {
229         ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
230         for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
231             Vector<Label>& labels = m_exitSiteLabels[i];
232             Vector<const void*> addresses;
233             for (unsigned j = 0; j < labels.size(); ++j)
234                 addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
235             m_graph.m_compilation->addOSRExitSite(addresses);
236         }
237     } else
238         ASSERT(!m_exitSiteLabels.size());
239     
240     m_jitCode->common.compilation = m_graph.m_compilation;
241     
242     m_graph.m_watchpoints.reallyAdd();
243 }
244
245 bool JITCompiler::compile(RefPtr<JSC::JITCode>& entry)
246 {
247     SamplingRegion samplingRegion("DFG Backend");
248
249     setStartOfCode();
250     compileEntry();
251     SpeculativeJIT speculative(*this);
252     compileBody(speculative);
253     setEndOfMainPath();
254
255     // Generate slow path code.
256     speculative.runSlowPathGenerators();
257     
258     compileExceptionHandlers();
259     linkOSRExits();
260     
261     // Create OSR entry trampolines if necessary.
262     speculative.createOSREntries();
263     setEndOfCode();
264
265     if (!m_graph.m_watchpoints.areStillValid())
266         return false;
267     
268     LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
269     if (linkBuffer.didFailToAllocate())
270         return false;
271     link(linkBuffer);
272     speculative.linkOSREntries(linkBuffer);
273     
274     m_jitCode->shrinkToFit();
275     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
276
277     if (shouldShowDisassembly())
278         m_disassembler->dump(linkBuffer);
279     if (m_graph.m_compilation)
280         m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);
281
282     m_jitCode->initializeCodeRef(linkBuffer.finalizeCodeWithoutDisassembly());
283     entry = m_jitCode;
284     return true;
285 }
286
287 bool JITCompiler::compileFunction(RefPtr<JSC::JITCode>& entry, MacroAssemblerCodePtr& entryWithArityCheck)
288 {
289     SamplingRegion samplingRegion("DFG Backend");
290     
291     setStartOfCode();
292     compileEntry();
293
294     // === Function header code generation ===
295     // This is the main entry point, without performing an arity check.
296     // If we needed to perform an arity check we will already have moved the return address,
297     // so enter after this.
298     Label fromArityCheck(this);
299     // Plant a check that sufficient space is available in the JSStack.
300     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
301     addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
302     Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1);
303     // Return here after stack check.
304     Label fromStackCheck = label();
305
306
307     // === Function body code generation ===
308     SpeculativeJIT speculative(*this);
309     compileBody(speculative);
310     setEndOfMainPath();
311
312     // === Function footer code generation ===
313     //
314     // Generate code to perform the slow stack check (if the fast one in
315     // the function header fails), and generate the entry point with arity check.
316     //
317     // Generate the stack check; if the fast check in the function head fails,
318     // we need to call out to a helper function to check whether more space is available.
319     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
320     stackCheck.link(this);
321     move(stackPointerRegister, GPRInfo::argumentGPR0);
322     poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
323
324     CallBeginToken token;
325     beginCall(CodeOrigin(0), token);
326     Call callStackCheck = call();
327     notifyCall(callStackCheck, CodeOrigin(0), token);
328     jump(fromStackCheck);
329     
330     // The fast entry point into a function does not check the correct number of arguments
331     // have been passed to the call (we only use the fast entry point where we can statically
332     // determine the correct number of arguments have been passed, or have already checked).
333     // In cases where an arity check is necessary, we enter here.
334     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
335     Label arityCheck = label();
336     compileEntry();
337
338     load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
339     branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
340     move(stackPointerRegister, GPRInfo::argumentGPR0);
341     poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
342     beginCall(CodeOrigin(0), token);
343     Call callArityCheck = call();
344     notifyCall(callArityCheck, CodeOrigin(0), token);
345     move(GPRInfo::regT0, GPRInfo::callFrameRegister);
346     jump(fromArityCheck);
347     
348     // Generate slow path code.
349     speculative.runSlowPathGenerators();
350     
351     compileExceptionHandlers();
352     linkOSRExits();
353     
354     // Create OSR entry trampolines if necessary.
355     speculative.createOSREntries();
356     setEndOfCode();
357     
358     if (!m_graph.m_watchpoints.areStillValid())
359         return false;
360
361     // === Link ===
362     LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
363     if (linkBuffer.didFailToAllocate())
364         return false;
365     link(linkBuffer);
366     speculative.linkOSREntries(linkBuffer);
367     
368     m_jitCode->shrinkToFit();
369     codeBlock()->shrinkToFit(CodeBlock::LateShrink);
370     
371     // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs.
372     linkBuffer.link(callStackCheck, cti_stack_check);
373     linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
374     
375     if (shouldShowDisassembly())
376         m_disassembler->dump(linkBuffer);
377     if (m_graph.m_compilation)
378         m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);
379
380     entryWithArityCheck = linkBuffer.locationOf(arityCheck);
381     m_jitCode->initializeCodeRef(linkBuffer.finalizeCodeWithoutDisassembly());
382     entry = m_jitCode;
383     return true;
384 }
385
386 } } // namespace JSC::DFG
387
388 #endif // ENABLE(DFG_JIT)