41b3f748913d4885ca251e725b61a1a58ebea427
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExitCompilerCommon.cpp
1 /*
2  * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExitCompilerCommon.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGJITCode.h"
32 #include "DFGOperations.h"
33 #include "JIT.h"
34 #include "JSCJSValueInlines.h"
35 #include "JSCInlines.h"
36 #include "StructureStubInfo.h"
37
38 namespace JSC { namespace DFG {
39
40 void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
41 {
42     if (!exitKindMayJettison(exit.m_kind)) {
43         // FIXME: We may want to notice that we're frequently exiting
44         // at an op_catch that we didn't compile an entrypoint for, and
45         // then trigger a reoptimization of this CodeBlock:
46         // https://bugs.webkit.org/show_bug.cgi?id=175842
47         return;
48     }
49
50     jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
51     
52     jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
53     
54     AssemblyHelpers::Jump tooFewFails;
55     
56     jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
57     jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
58     jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
59     
60     jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
61     AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
62         AssemblyHelpers::GreaterThanOrEqual,
63         AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
64         AssemblyHelpers::TrustedImm32(0));
65     
66     // We want to figure out if there's a possibility that we're in a loop. For the outermost
67     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
68     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
69     // problem is the inlined functions, which might also have loops, but whose baseline versions
70     // don't know where to look for the exit count. Figure out if those loops are severe enough
71     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
72     // Otherwise, we should use the normal reoptimization trigger.
73     
74     AssemblyHelpers::JumpList loopThreshold;
75     
76     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
77         loopThreshold.append(
78             jit.branchTest8(
79                 AssemblyHelpers::NonZero,
80                 AssemblyHelpers::AbsoluteAddress(
81                     inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->addressOfDidTryToEnterInLoop())));
82     }
83     
84     jit.move(
85         AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()),
86         GPRInfo::regT1);
87     
88     if (!loopThreshold.empty()) {
89         AssemblyHelpers::Jump done = jit.jump();
90
91         loopThreshold.link(&jit);
92         jit.move(
93             AssemblyHelpers::TrustedImm32(
94                 jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()),
95             GPRInfo::regT1);
96         
97         done.link(&jit);
98     }
99     
100     tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
101     
102     reoptimizeNow.link(&jit);
103     
104     // Reoptimize as soon as possible.
105 #if !NUMBER_OF_ARGUMENT_REGISTERS
106     jit.poke(GPRInfo::regT0);
107     jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1);
108 #else
109     jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
110     jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1);
111 #endif
112     jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0);
113     jit.call(GPRInfo::nonArgGPR0);
114     AssemblyHelpers::Jump doneAdjusting = jit.jump();
115     
116     tooFewFails.link(&jit);
117     
118     // Adjust the execution counter such that the target is to only optimize after a while.
119     int32_t activeThreshold =
120         jit.baselineCodeBlock()->adjustedCounterValue(
121             Options::thresholdForOptimizeAfterLongWarmUp());
122     int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
123         activeThreshold, jit.baselineCodeBlock());
124     int32_t clippedValue;
125     switch (jit.codeBlock()->jitType()) {
126     case JITCode::DFGJIT:
127         clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
128         break;
129     case JITCode::FTLJIT:
130         clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
131         break;
132     default:
133         RELEASE_ASSERT_NOT_REACHED();
134 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
135         clippedValue = 0; // Make some compilers, and mhahnenberg, happy.
136 #endif
137         break;
138     }
139     jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
140     jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
141     jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
142     
143     doneAdjusting.link(&jit);
144 }
145
146 void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
147 {
148     // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
149     // in presence of inlined tail calls.
150     // https://bugs.webkit.org/show_bug.cgi?id=147511
151     ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
152     jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)CallFrameSlot::codeBlock));
153
154     const CodeOrigin* codeOrigin;
155     for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame; codeOrigin = codeOrigin->inlineCallFrame->getCallerSkippingTailCalls()) {
156         InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
157         CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(*codeOrigin);
158         InlineCallFrame::Kind trueCallerCallKind;
159         CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
160         GPRReg callerFrameGPR = GPRInfo::callFrameRegister;
161
162         if (!trueCaller) {
163             ASSERT(inlineCallFrame->isTail());
164             jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
165             jit.storePtr(GPRInfo::regT3, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
166             jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), GPRInfo::regT3);
167             callerFrameGPR = GPRInfo::regT3;
168         } else {
169             CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
170             unsigned callBytecodeIndex = trueCaller->bytecodeIndex;
171             void* jumpTarget = nullptr;
172
173             switch (trueCallerCallKind) {
174             case InlineCallFrame::Call:
175             case InlineCallFrame::Construct:
176             case InlineCallFrame::CallVarargs:
177             case InlineCallFrame::ConstructVarargs:
178             case InlineCallFrame::TailCall:
179             case InlineCallFrame::TailCallVarargs: {
180                 CallLinkInfo* callLinkInfo =
181                     baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
182                 RELEASE_ASSERT(callLinkInfo);
183
184                 jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
185                 break;
186             }
187
188             case InlineCallFrame::GetterCall:
189             case InlineCallFrame::SetterCall: {
190                 StructureStubInfo* stubInfo =
191                     baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
192                 RELEASE_ASSERT(stubInfo);
193
194                 jumpTarget = stubInfo->doneLocation().executableAddress();
195                 break;
196             }
197
198             default:
199                 RELEASE_ASSERT_NOT_REACHED();
200             }
201
202             if (trueCaller->inlineCallFrame) {
203                 jit.addPtr(
204                     AssemblyHelpers::TrustedImm32(trueCaller->inlineCallFrame->stackOffset * sizeof(EncodedJSValue)),
205                     GPRInfo::callFrameRegister,
206                     GPRInfo::regT3);
207                 callerFrameGPR = GPRInfo::regT3;
208             }
209
210             jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
211         }
212
213         jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock)));
214
215         // Restore the inline call frame's callee save registers.
216         // If this inlined frame is a tail call that will return back to the original caller, we need to
217         // copy the prior contents of the tag registers already saved for the outer frame to this frame.
218         jit.emitSaveOrCopyCalleeSavesFor(
219             baselineCodeBlock,
220             static_cast<VirtualRegister>(inlineCallFrame->stackOffset),
221             trueCaller ? AssemblyHelpers::UseExistingTagRegisterContents : AssemblyHelpers::CopyBaselineCalleeSavedRegistersFromBaseFrame,
222             GPRInfo::regT2);
223
224         if (!inlineCallFrame->isVarargs())
225             jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->argumentCountIncludingThis), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
226 #if USE(JSVALUE64)
227         jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
228         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
229         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
230         if (!inlineCallFrame->isClosureCall)
231             jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
232 #else // USE(JSVALUE64) // so this is the 32-bit part
233         jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
234         Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
235         uint32_t locationBits = CallSiteIndex(instruction).bits();
236         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
237         jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
238         if (!inlineCallFrame->isClosureCall)
239             jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
240 #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
241     }
242
243     // Don't need to set the toplevel code origin if we only did inline tail calls
244     if (codeOrigin) {
245 #if USE(JSVALUE64)
246         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
247 #else
248         Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin->bytecodeIndex;
249         uint32_t locationBits = CallSiteIndex(instruction).bits();
250 #endif
251         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(CallFrameSlot::argumentCount)));
252     }
253 }
254
255 static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
256 {
257     AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.barrierBranchWithoutFence(owner);
258
259     // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
260 #if CPU(X86)
261     jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 4), MacroAssembler::stackPointerRegister);
262 #endif
263
264     jit.setupArguments<decltype(operationOSRWriteBarrier)>(owner);
265     jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch);
266     jit.call(scratch);
267
268 #if CPU(X86)
269     jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 4), MacroAssembler::stackPointerRegister);
270 #endif
271
272     ownerIsRememberedOrInEden.link(&jit);
273 }
274
275 void adjustAndJumpToTarget(VM& vm, CCallHelpers& jit, const OSRExitBase& exit)
276 {
277     jit.memoryFence();
278     
279     jit.move(
280         AssemblyHelpers::TrustedImmPtr(
281             jit.codeBlock()->baselineAlternative()), GPRInfo::argumentGPR1);
282     osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
283
284     // We barrier all inlined frames -- and not just the current inline stack --
285     // because we don't know which inlined function owns the value profile that
286     // we'll update when we exit. In the case of "f() { a(); b(); }", if both
287     // a and b are inlined, we might exit inside b due to a bad value loaded
288     // from a.
289     // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns
290     // the value profile.
291     InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
292     if (inlineCallFrames) {
293         for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
294             jit.move(
295                 AssemblyHelpers::TrustedImmPtr(
296                     inlineCallFrame->baselineCodeBlock.get()), GPRInfo::argumentGPR1);
297             osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
298         }
299     }
300
301     if (exit.m_codeOrigin.inlineCallFrame)
302         jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
303
304     CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin);
305     Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(codeBlockForExit);
306     
307     BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
308     
309     ASSERT(mapping);
310     ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
311     
312     void* jumpTarget = codeBlockForExit->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
313
314     jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
315     if (exit.isExceptionHandler()) {
316         // Since we're jumping to op_catch, we need to set callFrameForCatch.
317         jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
318     }
319     
320     jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
321     jit.jump(GPRInfo::regT2);
322 }
323
324 } } // namespace JSC::DFG
325
326 #endif // ENABLE(DFG_JIT)
327