Templatize CodePtr/Refs/FunctionPtrs with PtrTags.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGOSRExitCompilerCommon.cpp
1 /*
2  * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGOSRExitCompilerCommon.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGJITCode.h"
32 #include "DFGOperations.h"
33 #include "JIT.h"
34 #include "JSCJSValueInlines.h"
35 #include "JSCInlines.h"
36 #include "StructureStubInfo.h"
37
38 namespace JSC { namespace DFG {
39
40 void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
41 {
42     if (!exitKindMayJettison(exit.m_kind)) {
43         // FIXME: We may want to notice that we're frequently exiting
44         // at an op_catch that we didn't compile an entrypoint for, and
45         // then trigger a reoptimization of this CodeBlock:
46         // https://bugs.webkit.org/show_bug.cgi?id=175842
47         return;
48     }
49
50     jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
51     
52     jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
53     
54     AssemblyHelpers::Jump tooFewFails;
55     
56     jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
57     jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
58     jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
59     
60     jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
61     AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
62         AssemblyHelpers::GreaterThanOrEqual,
63         AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
64         AssemblyHelpers::TrustedImm32(0));
65     
66     // We want to figure out if there's a possibility that we're in a loop. For the outermost
67     // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
68     // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
69     // problem is the inlined functions, which might also have loops, but whose baseline versions
70     // don't know where to look for the exit count. Figure out if those loops are severe enough
71     // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
72     // Otherwise, we should use the normal reoptimization trigger.
73     
74     AssemblyHelpers::JumpList loopThreshold;
75     
76     for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
77         loopThreshold.append(
78             jit.branchTest8(
79                 AssemblyHelpers::NonZero,
80                 AssemblyHelpers::AbsoluteAddress(
81                     inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->addressOfDidTryToEnterInLoop())));
82     }
83     
84     jit.move(
85         AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()),
86         GPRInfo::regT1);
87     
88     if (!loopThreshold.empty()) {
89         AssemblyHelpers::Jump done = jit.jump();
90
91         loopThreshold.link(&jit);
92         jit.move(
93             AssemblyHelpers::TrustedImm32(
94                 jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()),
95             GPRInfo::regT1);
96         
97         done.link(&jit);
98     }
99     
100     tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
101     
102     reoptimizeNow.link(&jit);
103     
104     // Reoptimize as soon as possible.
105 #if !NUMBER_OF_ARGUMENT_REGISTERS
106     jit.poke(GPRInfo::regT0);
107     jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1);
108 #else
109     jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
110     jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1);
111 #endif
112     jit.move(AssemblyHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0);
113     jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
114     AssemblyHelpers::Jump doneAdjusting = jit.jump();
115     
116     tooFewFails.link(&jit);
117     
118     // Adjust the execution counter such that the target is to only optimize after a while.
119     int32_t activeThreshold =
120         jit.baselineCodeBlock()->adjustedCounterValue(
121             Options::thresholdForOptimizeAfterLongWarmUp());
122     int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
123         activeThreshold, jit.baselineCodeBlock());
124     int32_t clippedValue;
125     switch (jit.codeBlock()->jitType()) {
126     case JITCode::DFGJIT:
127         clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
128         break;
129     case JITCode::FTLJIT:
130         clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
131         break;
132     default:
133         RELEASE_ASSERT_NOT_REACHED();
134 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
135         clippedValue = 0; // Make some compilers, and mhahnenberg, happy.
136 #endif
137         break;
138     }
139     jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
140     jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
141     jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
142     
143     doneAdjusting.link(&jit);
144 }
145
146 void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
147 {
148     // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
149     // in presence of inlined tail calls.
150     // https://bugs.webkit.org/show_bug.cgi?id=147511
151     ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
152     jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)CallFrameSlot::codeBlock));
153
154     const CodeOrigin* codeOrigin;
155     for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame; codeOrigin = codeOrigin->inlineCallFrame->getCallerSkippingTailCalls()) {
156         InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
157         CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(*codeOrigin);
158         InlineCallFrame::Kind trueCallerCallKind;
159         CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
160         GPRReg callerFrameGPR = GPRInfo::callFrameRegister;
161
162         if (!trueCaller) {
163             ASSERT(inlineCallFrame->isTail());
164             jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
165 #if USE(POINTER_PROFILING)
166             jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, GPRInfo::regT2);
167             jit.untagPtr(GPRInfo::regT3, GPRInfo::regT2);
168             jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->returnPCOffset() + sizeof(void*)), GPRInfo::callFrameRegister, GPRInfo::regT2);
169             jit.tagPtr(GPRInfo::regT3, GPRInfo::regT2);
170 #endif
171             jit.storePtr(GPRInfo::regT3, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
172             jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), GPRInfo::regT3);
173             callerFrameGPR = GPRInfo::regT3;
174         } else {
175             CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
176             unsigned callBytecodeIndex = trueCaller->bytecodeIndex;
177             void* jumpTarget = nullptr;
178
179             switch (trueCallerCallKind) {
180             case InlineCallFrame::Call:
181             case InlineCallFrame::Construct:
182             case InlineCallFrame::CallVarargs:
183             case InlineCallFrame::ConstructVarargs:
184             case InlineCallFrame::TailCall:
185             case InlineCallFrame::TailCallVarargs: {
186                 CallLinkInfo* callLinkInfo =
187                     baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
188                 RELEASE_ASSERT(callLinkInfo);
189
190                 jumpTarget = callLinkInfo->callReturnLocation().untaggedExecutableAddress();
191                 break;
192             }
193
194             case InlineCallFrame::GetterCall:
195             case InlineCallFrame::SetterCall: {
196                 StructureStubInfo* stubInfo =
197                     baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
198                 RELEASE_ASSERT(stubInfo);
199
200                 jumpTarget = stubInfo->doneLocation().untaggedExecutableAddress();
201                 break;
202             }
203
204             default:
205                 RELEASE_ASSERT_NOT_REACHED();
206             }
207
208             if (trueCaller->inlineCallFrame) {
209                 jit.addPtr(
210                     AssemblyHelpers::TrustedImm32(trueCaller->inlineCallFrame->stackOffset * sizeof(EncodedJSValue)),
211                     GPRInfo::callFrameRegister,
212                     GPRInfo::regT3);
213                 callerFrameGPR = GPRInfo::regT3;
214             }
215
216 #if USE(POINTER_PROFILING)
217             jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->returnPCOffset() + sizeof(void*)), GPRInfo::callFrameRegister, GPRInfo::regT2);
218             jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::nonArgGPR0);
219             jit.tagPtr(GPRInfo::nonArgGPR0, GPRInfo::regT2);
220             jit.storePtr(GPRInfo::nonArgGPR0, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
221 #else
222             jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
223 #endif
224         }
225
226         jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock)));
227
228         // Restore the inline call frame's callee save registers.
229         // If this inlined frame is a tail call that will return back to the original caller, we need to
230         // copy the prior contents of the tag registers already saved for the outer frame to this frame.
231         jit.emitSaveOrCopyCalleeSavesFor(
232             baselineCodeBlock,
233             static_cast<VirtualRegister>(inlineCallFrame->stackOffset),
234             trueCaller ? AssemblyHelpers::UseExistingTagRegisterContents : AssemblyHelpers::CopyBaselineCalleeSavedRegistersFromBaseFrame,
235             GPRInfo::regT2);
236
237         if (!inlineCallFrame->isVarargs())
238             jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->argumentCountIncludingThis), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
239 #if USE(JSVALUE64)
240         jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
241         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
242         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
243         if (!inlineCallFrame->isClosureCall)
244             jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
245 #else // USE(JSVALUE64) // so this is the 32-bit part
246         jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
247         Instruction* instruction = &baselineCodeBlock->instructions()[codeOrigin->bytecodeIndex];
248         uint32_t locationBits = CallSiteIndex(instruction).bits();
249         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
250         jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
251         if (!inlineCallFrame->isClosureCall)
252             jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
253 #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
254     }
255
256     // Don't need to set the toplevel code origin if we only did inline tail calls
257     if (codeOrigin) {
258 #if USE(JSVALUE64)
259         uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
260 #else
261         Instruction* instruction = &jit.baselineCodeBlock()->instructions()[codeOrigin->bytecodeIndex];
262         uint32_t locationBits = CallSiteIndex(instruction).bits();
263 #endif
264         jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(CallFrameSlot::argumentCount)));
265     }
266 }
267
268 static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
269 {
270     AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.barrierBranchWithoutFence(owner);
271
272     // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
273 #if CPU(X86)
274     jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 4), MacroAssembler::stackPointerRegister);
275 #endif
276
277     jit.setupArguments<decltype(operationOSRWriteBarrier)>(owner);
278     jit.move(MacroAssembler::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationOSRWriteBarrier)), scratch);
279     jit.call(scratch, OperationPtrTag);
280
281 #if CPU(X86)
282     jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 4), MacroAssembler::stackPointerRegister);
283 #endif
284
285     ownerIsRememberedOrInEden.link(&jit);
286 }
287
288 void adjustAndJumpToTarget(VM& vm, CCallHelpers& jit, const OSRExitBase& exit)
289 {
290     jit.memoryFence();
291     
292     jit.move(
293         AssemblyHelpers::TrustedImmPtr(
294             jit.codeBlock()->baselineAlternative()), GPRInfo::argumentGPR1);
295     osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
296
297     // We barrier all inlined frames -- and not just the current inline stack --
298     // because we don't know which inlined function owns the value profile that
299     // we'll update when we exit. In the case of "f() { a(); b(); }", if both
300     // a and b are inlined, we might exit inside b due to a bad value loaded
301     // from a.
302     // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns
303     // the value profile.
304     InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
305     if (inlineCallFrames) {
306         for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
307             jit.move(
308                 AssemblyHelpers::TrustedImmPtr(
309                     inlineCallFrame->baselineCodeBlock.get()), GPRInfo::argumentGPR1);
310             osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
311         }
312     }
313
314     if (exit.m_codeOrigin.inlineCallFrame)
315         jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
316
317     CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin);
318     ASSERT(codeBlockForExit == codeBlockForExit->baselineVersion());
319     ASSERT(codeBlockForExit->jitType() == JITCode::BaselineJIT);
320     CodeLocationLabel<JSEntryPtrTag> codeLocation = codeBlockForExit->jitCodeMap().find(exit.m_codeOrigin.bytecodeIndex);
321     ASSERT(codeLocation);
322
323     void* jumpTarget = codeLocation.retagged<OSRExitPtrTag>().executableAddress();
324     jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
325     if (exit.isExceptionHandler()) {
326         // Since we're jumping to op_catch, we need to set callFrameForCatch.
327         jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
328     }
329     
330     jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
331     jit.jump(GPRInfo::regT2, OSRExitPtrTag);
332 }
333
334 } } // namespace JSC::DFG
335
336 #endif // ENABLE(DFG_JIT)
337