[Re-landing] Use JIT probes for DFG OSR exit.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExitCompilerCommon.cpp
1 /*
2  * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExitCompilerCommon.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGJITCode.h"
32 #include "DFGOperations.h"
33 #include "JIT.h"
34 #include "JSCJSValueInlines.h"
35 #include "JSCInlines.h"
36 #include "StructureStubInfo.h"
37
38 namespace JSC { namespace DFG {
39
40 // FIXME: remove this when we fix https://bugs.webkit.org/show_bug.cgi?id=175145.
41 void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
42 {
43     if (!exitKindMayJettison(exit.m_kind)) {
44         // FIXME: We may want to notice that we're frequently exiting
45         // at an op_catch that we didn't compile an entrypoint for, and
46         // then trigger a reoptimization of this CodeBlock:
47         // https://bugs.webkit.org/show_bug.cgi?id=175842
48         return;
49     }
50
51     jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
52     
53     jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
54     
55     AssemblyHelpers::Jump tooFewFails;
56     
57     jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
58     jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
59     jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
60     
61     jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
62     AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
63         AssemblyHelpers::GreaterThanOrEqual,
64         AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
65         AssemblyHelpers::TrustedImm32(0));
66     
67     // We want to figure out if there's a possibility that we're in a loop. For the outermost
68     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
69     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
70     // problem is the inlined functions, which might also have loops, but whose baseline versions
71     // don't know where to look for the exit count. Figure out if those loops are severe enough
72     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
73     // Otherwise, we should use the normal reoptimization trigger.
74     
75     AssemblyHelpers::JumpList loopThreshold;
76     
77     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
78         loopThreshold.append(
79             jit.branchTest8(
80                 AssemblyHelpers::NonZero,
81                 AssemblyHelpers::AbsoluteAddress(
82                     inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->addressOfDidTryToEnterInLoop())));
83     }
84     
85     jit.move(
86         AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()),
87         GPRInfo::regT1);
88     
89     if (!loopThreshold.empty()) {
90         AssemblyHelpers::Jump done = jit.jump();
91
92         loopThreshold.link(&jit);
93         jit.move(
94             AssemblyHelpers::TrustedImm32(
95                 jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()),
96             GPRInfo::regT1);
97         
98         done.link(&jit);
99     }
100     
101     tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
102     
103     reoptimizeNow.link(&jit);
104     
105     // Reoptimize as soon as possible.
106 #if !NUMBER_OF_ARGUMENT_REGISTERS
107     jit.poke(GPRInfo::regT0);
108     jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1);
109 #else
110     jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
111     jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1);
112 #endif
113     jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0);
114     jit.call(GPRInfo::nonArgGPR0);
115     AssemblyHelpers::Jump doneAdjusting = jit.jump();
116     
117     tooFewFails.link(&jit);
118     
119     // Adjust the execution counter such that the target is to only optimize after a while.
120     int32_t activeThreshold =
121         jit.baselineCodeBlock()->adjustedCounterValue(
122             Options::thresholdForOptimizeAfterLongWarmUp());
123     int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
124         activeThreshold, jit.baselineCodeBlock());
125     int32_t clippedValue;
126     switch (jit.codeBlock()->jitType()) {
127     case JITCode::DFGJIT:
128         clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
129         break;
130     case JITCode::FTLJIT:
131         clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
132         break;
133     default:
134         RELEASE_ASSERT_NOT_REACHED();
135 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
136         clippedValue = 0; // Make some compilers, and mhahnenberg, happy.
137 #endif
138         break;
139     }
140     jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
141     jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
142     jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
143     
144     doneAdjusting.link(&jit);
145 }
146
147 // FIXME: remove this when we fix https://bugs.webkit.org/show_bug.cgi?id=175145.
148 void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
149 {
150     // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
151     // in presence of inlined tail calls.
152     // https://bugs.webkit.org/show_bug.cgi?id=147511
153     ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
154     jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)CallFrameSlot::codeBlock));
155
156     const CodeOrigin* codeOrigin;
157     for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame; codeOrigin = codeOrigin->inlineCallFrame->getCallerSkippingTailCalls()) {
158         InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
159         CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(*codeOrigin);
160         InlineCallFrame::Kind trueCallerCallKind;
161         CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
162         GPRReg callerFrameGPR = GPRInfo::callFrameRegister;
163
164         if (!trueCaller) {
165             ASSERT(inlineCallFrame->isTail());
166             jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
167             jit.storePtr(GPRInfo::regT3, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
168             jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), GPRInfo::regT3);
169             callerFrameGPR = GPRInfo::regT3;
170         } else {
171             CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
172             unsigned callBytecodeIndex = trueCaller->bytecodeIndex;
173             void* jumpTarget = nullptr;
174
175             switch (trueCallerCallKind) {
176             case InlineCallFrame::Call:
177             case InlineCallFrame::Construct:
178             case InlineCallFrame::CallVarargs:
179             case InlineCallFrame::ConstructVarargs:
180             case InlineCallFrame::TailCall:
181             case InlineCallFrame::TailCallVarargs: {
182                 CallLinkInfo* callLinkInfo =
183                     baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
184                 RELEASE_ASSERT(callLinkInfo);
185
186                 jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
187                 break;
188             }
189
190             case InlineCallFrame::GetterCall:
191             case InlineCallFrame::SetterCall: {
192                 StructureStubInfo* stubInfo =
193                     baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
194                 RELEASE_ASSERT(stubInfo);
195
196                 jumpTarget = stubInfo->doneLocation().executableAddress();
197                 break;
198             }
199
200             default:
201                 RELEASE_ASSERT_NOT_REACHED();
202             }
203
204             if (trueCaller->inlineCallFrame) {
205                 jit.addPtr(
206                     AssemblyHelpers::TrustedImm32(trueCaller->inlineCallFrame->stackOffset * sizeof(EncodedJSValue)),
207                     GPRInfo::callFrameRegister,
208                     GPRInfo::regT3);
209                 callerFrameGPR = GPRInfo::regT3;
210             }
211
212             jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
213         }
214
215         jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock)));
216
217         // Restore the inline call frame's callee save registers.
218         // If this inlined frame is a tail call that will return back to the original caller, we need to
219         // copy the prior contents of the tag registers already saved for the outer frame to this frame.
220         jit.emitSaveOrCopyCalleeSavesFor(
221             baselineCodeBlock,
222             static_cast<VirtualRegister>(inlineCallFrame->stackOffset),
223             trueCaller ? AssemblyHelpers::UseExistingTagRegisterContents : AssemblyHelpers::CopyBaselineCalleeSavedRegistersFromBaseFrame,
224             GPRInfo::regT2);
225
226         if (!inlineCallFrame->isVarargs())
227             jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->argumentCountIncludingThis), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
228 #if USE(JSVALUE64)
229         jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
230         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
231         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
232         if (!inlineCallFrame->isClosureCall)
233             jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
234 #else // USE(JSVALUE64) // so this is the 32-bit part
235         jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
236         Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
237         uint32_t locationBits = CallSiteIndex(instruction).bits();
238         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
239         jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
240         if (!inlineCallFrame->isClosureCall)
241             jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
242 #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
243     }
244
245     // Don't need to set the toplevel code origin if we only did inline tail calls
246     if (codeOrigin) {
247 #if USE(JSVALUE64)
248         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
249 #else
250         Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin->bytecodeIndex;
251         uint32_t locationBits = CallSiteIndex(instruction).bits();
252 #endif
253         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(CallFrameSlot::argumentCount)));
254     }
255 }
256
257 // FIXME: remove this when we fix https://bugs.webkit.org/show_bug.cgi?id=175145.
258 static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
259 {
260     AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.barrierBranchWithoutFence(owner);
261
262     // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
263 #if CPU(X86)
264     jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
265 #endif
266
267     jit.setupArgumentsWithExecState(owner);
268     jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch);
269     jit.call(scratch);
270
271 #if CPU(X86)
272     jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
273 #endif
274
275     ownerIsRememberedOrInEden.link(&jit);
276 }
277
278 // FIXME: remove this when we fix https://bugs.webkit.org/show_bug.cgi?id=175145.
279 void adjustAndJumpToTarget(VM& vm, CCallHelpers& jit, const OSRExitBase& exit)
280 {
281     jit.memoryFence();
282     
283     jit.move(
284         AssemblyHelpers::TrustedImmPtr(
285             jit.codeBlock()->baselineAlternative()), GPRInfo::argumentGPR1);
286     osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
287
288     // We barrier all inlined frames -- and not just the current inline stack --
289     // because we don't know which inlined function owns the value profile that
290     // we'll update when we exit. In the case of "f() { a(); b(); }", if both
291     // a and b are inlined, we might exit inside b due to a bad value loaded
292     // from a.
293     // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns
294     // the value profile.
295     InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
296     if (inlineCallFrames) {
297         for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
298             jit.move(
299                 AssemblyHelpers::TrustedImmPtr(
300                     inlineCallFrame->baselineCodeBlock.get()), GPRInfo::argumentGPR1);
301             osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
302         }
303     }
304
305     if (exit.m_codeOrigin.inlineCallFrame)
306         jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
307
308     CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin);
309     Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(codeBlockForExit);
310     
311     BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
312     
313     ASSERT(mapping);
314     ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
315     
316     void* jumpTarget = codeBlockForExit->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
317
318     jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
319     if (exit.isExceptionHandler()) {
320         // Since we're jumping to op_catch, we need to set callFrameForCatch.
321         jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
322     }
323     
324     jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
325     jit.jump(GPRInfo::regT2);
326 }
327
328 } } // namespace JSC::DFG
329
330 #endif // ENABLE(DFG_JIT)
331