740fc8fbee1172d5aff19dca71c0605224328b5b
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGJITCode.cpp
1 /*
2  * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGJITCode.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "FTLForOSREntryJITCode.h"
33 #include "JSCInlines.h"
34 #include "TrackedReferences.h"
35
36 namespace JSC { namespace DFG {
37
38 JITCode::JITCode()
39     : DirectJITCode(DFGJIT)
40 #if ENABLE(FTL_JIT)
41     , osrEntryRetry(0)
42     , abandonOSREntry(false)
43 #endif // ENABLE(FTL_JIT)
44 {
45 }
46
47 JITCode::~JITCode()
48 {
49 }
50
51 CommonData* JITCode::dfgCommon()
52 {
53     return &common;
54 }
55
56 JITCode* JITCode::dfg()
57 {
58     return this;
59 }
60
61 void JITCode::shrinkToFit()
62 {
63     common.shrinkToFit();
64     osrEntry.shrinkToFit();
65     osrExit.shrinkToFit();
66     speculationRecovery.shrinkToFit();
67     minifiedDFG.prepareAndShrink();
68     variableEventStream.shrinkToFit();
69 }
70
71 void JITCode::reconstruct(
72     CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
73     Operands<ValueRecovery>& result)
74 {
75     variableEventStream.reconstruct(
76         codeBlock, codeOrigin, minifiedDFG, streamIndex, result);
77 }
78
79 void JITCode::reconstruct(
80     ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
81     Operands<Optional<JSValue>>& result)
82 {
83     Operands<ValueRecovery> recoveries;
84     reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
85     
86     result = Operands<Optional<JSValue>>(OperandsLike, recoveries);
87     for (size_t i = result.size(); i--;)
88         result[i] = recoveries[i].recover(exec);
89 }
90
91 RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock* codeBlock, CallSiteIndex callSiteIndex)
92 {
93     for (OSRExit& exit : osrExit) {
94         if (exit.isExceptionHandler() && exit.m_exceptionHandlerCallSiteIndex.bits() == callSiteIndex.bits()) {
95             Operands<ValueRecovery> valueRecoveries;
96             reconstruct(codeBlock, exit.m_codeOrigin, exit.m_streamIndex, valueRecoveries);
97             RegisterSet liveAtOSRExit;
98             for (size_t index = 0; index < valueRecoveries.size(); ++index) {
99                 const ValueRecovery& recovery = valueRecoveries[index];
100                 if (recovery.isInRegisters()) {
101                     if (recovery.isInGPR())
102                         liveAtOSRExit.set(recovery.gpr());
103                     else if (recovery.isInFPR())
104                         liveAtOSRExit.set(recovery.fpr());
105 #if USE(JSVALUE32_64)
106                     else if (recovery.isInJSValueRegs()) {
107                         liveAtOSRExit.set(recovery.payloadGPR());
108                         liveAtOSRExit.set(recovery.tagGPR());
109                     }
110 #endif
111                     else
112                         RELEASE_ASSERT_NOT_REACHED();
113                 }
114             }
115
116             return liveAtOSRExit;
117         }
118     }
119
120     return { };
121 }
122
123 #if ENABLE(FTL_JIT)
124 bool JITCode::checkIfOptimizationThresholdReached(CodeBlock* codeBlock)
125 {
126     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
127     return tierUpCounter.checkIfThresholdCrossedAndSet(codeBlock);
128 }
129
130 void JITCode::optimizeNextInvocation(CodeBlock* codeBlock)
131 {
132     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
133     if (Options::verboseOSR())
134         dataLog(*codeBlock, ": FTL-optimizing next invocation.\n");
135     tierUpCounter.setNewThreshold(0, codeBlock);
136 }
137
138 void JITCode::dontOptimizeAnytimeSoon(CodeBlock* codeBlock)
139 {
140     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
141     if (Options::verboseOSR())
142         dataLog(*codeBlock, ": Not FTL-optimizing anytime soon.\n");
143     tierUpCounter.deferIndefinitely();
144 }
145
146 void JITCode::optimizeAfterWarmUp(CodeBlock* codeBlock)
147 {
148     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
149     if (Options::verboseOSR())
150         dataLog(*codeBlock, ": FTL-optimizing after warm-up.\n");
151     CodeBlock* baseline = codeBlock->baselineVersion();
152     tierUpCounter.setNewThreshold(
153         baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeAfterWarmUp()),
154         baseline);
155 }
156
157 void JITCode::optimizeSoon(CodeBlock* codeBlock)
158 {
159     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
160     if (Options::verboseOSR())
161         dataLog(*codeBlock, ": FTL-optimizing soon.\n");
162     CodeBlock* baseline = codeBlock->baselineVersion();
163     tierUpCounter.setNewThreshold(
164         baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeSoon()),
165         codeBlock);
166 }
167
168 void JITCode::forceOptimizationSlowPathConcurrently(CodeBlock* codeBlock)
169 {
170     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
171     if (Options::verboseOSR())
172         dataLog(*codeBlock, ": Forcing slow path concurrently for FTL entry.\n");
173     tierUpCounter.forceSlowPathConcurrently();
174 }
175
176 void JITCode::setOptimizationThresholdBasedOnCompilationResult(
177     CodeBlock* codeBlock, CompilationResult result)
178 {
179     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
180     switch (result) {
181     case CompilationSuccessful:
182         optimizeNextInvocation(codeBlock);
183         codeBlock->baselineVersion()->m_hasBeenCompiledWithFTL = true;
184         return;
185     case CompilationFailed:
186         dontOptimizeAnytimeSoon(codeBlock);
187         codeBlock->baselineVersion()->m_didFailFTLCompilation = true;
188         return;
189     case CompilationDeferred:
190         optimizeAfterWarmUp(codeBlock);
191         return;
192     case CompilationInvalidated:
193         // This is weird - it will only happen in cases when the DFG code block (i.e.
194         // the code block that this JITCode belongs to) is also invalidated. So it
195         // doesn't really matter what we do. But, we do the right thing anyway. Note
196         // that us counting the reoptimization actually means that we might count it
197         // twice. But that's generally OK. It's better to overcount reoptimizations
198         // than it is to undercount them.
199         codeBlock->baselineVersion()->countReoptimization();
200         optimizeAfterWarmUp(codeBlock);
201         return;
202     }
203     RELEASE_ASSERT_NOT_REACHED();
204 }
205
206 void JITCode::setOSREntryBlock(VM& vm, const JSCell* owner, CodeBlock* osrEntryBlock)
207 {
208     if (Options::verboseOSR()) {
209         dataLog(RawPointer(this), ": Setting OSR entry block to ", RawPointer(osrEntryBlock), "\n");
210         dataLog("OSR entries will go to ", osrEntryBlock->jitCode()->ftlForOSREntry()->addressForCall(ArityCheckNotRequired), "\n");
211     }
212     m_osrEntryBlock.set(vm, owner, osrEntryBlock);
213 }
214 #endif // ENABLE(FTL_JIT)
215
216 void JITCode::validateReferences(const TrackedReferences& trackedReferences)
217 {
218     common.validateReferences(trackedReferences);
219     
220     for (OSREntryData& entry : osrEntry) {
221         for (unsigned i = entry.m_expectedValues.size(); i--;)
222             entry.m_expectedValues[i].validateReferences(trackedReferences);
223     }
224     
225     minifiedDFG.validateReferences(trackedReferences);
226 }
227
228 Optional<CodeOrigin> JITCode::findPC(CodeBlock*, void* pc)
229 {
230     for (OSRExit& exit : osrExit) {
231         if (ExecutableMemoryHandle* handle = exit.m_code.executableMemory()) {
232             if (handle->start().untaggedPtr() <= pc && pc < handle->end().untaggedPtr())
233                 return Optional<CodeOrigin>(exit.m_codeOriginForExitProfile);
234         }
235     }
236
237     return WTF::nullopt;
238 }
239
240 void JITCode::finalizeOSREntrypoints()
241 {
242     auto comparator = [] (const auto& a, const auto& b) {
243         return a.m_bytecodeIndex < b.m_bytecodeIndex;
244     };
245     std::sort(osrEntry.begin(), osrEntry.end(), comparator);
246
247 #if !ASSERT_DISABLED
248     auto verifyIsSorted = [&] (auto& osrVector) {
249         for (unsigned i = 0; i + 1 < osrVector.size(); ++i)
250             ASSERT(osrVector[i].m_bytecodeIndex <= osrVector[i + 1].m_bytecodeIndex);
251     };
252     verifyIsSorted(osrEntry);
253 #endif
254 }
255
256 } } // namespace JSC::DFG
257
258 #endif // ENABLE(DFG_JIT)