We should be able to inline getter/setter calls inside an inline cache even when...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGJITCode.cpp
1 /*
2  * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGJITCode.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "CodeBlock.h"
32 #include "JSCInlines.h"
33 #include "TrackedReferences.h"
34
35 namespace JSC { namespace DFG {
36
37 JITCode::JITCode()
38     : DirectJITCode(DFGJIT)
39 #if ENABLE(FTL_JIT)
40     , osrEntryRetry(0)
41     , abandonOSREntry(false)
42 #endif // ENABLE(FTL_JIT)
43 {
44 }
45
46 JITCode::~JITCode()
47 {
48 }
49
50 CommonData* JITCode::dfgCommon()
51 {
52     return &common;
53 }
54
55 JITCode* JITCode::dfg()
56 {
57     return this;
58 }
59
60 void JITCode::shrinkToFit()
61 {
62     common.shrinkToFit();
63     osrEntry.shrinkToFit();
64     osrExit.shrinkToFit();
65     speculationRecovery.shrinkToFit();
66     minifiedDFG.prepareAndShrink();
67     variableEventStream.shrinkToFit();
68 }
69
70 void JITCode::reconstruct(
71     CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
72     Operands<ValueRecovery>& result)
73 {
74     variableEventStream.reconstruct(
75         codeBlock, codeOrigin, minifiedDFG, streamIndex, result);
76 }
77
78 void JITCode::reconstruct(
79     ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
80     Operands<JSValue>& result)
81 {
82     Operands<ValueRecovery> recoveries;
83     reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
84     
85     result = Operands<JSValue>(OperandsLike, recoveries);
86     for (size_t i = result.size(); i--;)
87         result[i] = recoveries[i].recover(exec);
88 }
89
90 RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock* codeBlock, CallSiteIndex callSiteIndex)
91 {
92     for (OSRExit& exit : osrExit) {
93         if (exit.m_isExceptionHandler && exit.m_exceptionHandlerCallSiteIndex.bits() == callSiteIndex.bits()) {
94             Operands<ValueRecovery> valueRecoveries;
95             reconstruct(codeBlock, exit.m_codeOrigin, exit.m_streamIndex, valueRecoveries);
96             RegisterSet liveAtOSRExit;
97             for (size_t index = 0; index < valueRecoveries.size(); ++index) {
98                 const ValueRecovery& recovery = valueRecoveries[index];
99                 if (recovery.isInRegisters()) {
100                     if (recovery.isInGPR())
101                         liveAtOSRExit.set(recovery.gpr());
102                     else if (recovery.isInFPR())
103                         liveAtOSRExit.set(recovery.fpr());
104 #if USE(JSVALUE32_64)
105                     else if (recovery.isInJSValueRegs()) {
106                         liveAtOSRExit.set(recovery.payloadGPR());
107                         liveAtOSRExit.set(recovery.tagGPR());
108                     }
109 #endif
110                     else
111                         RELEASE_ASSERT_NOT_REACHED();
112                 }
113             }
114
115             return liveAtOSRExit;
116         }
117     }
118
119     return RegisterSet();
120 }
121
122 #if ENABLE(FTL_JIT)
123 bool JITCode::checkIfOptimizationThresholdReached(CodeBlock* codeBlock)
124 {
125     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
126     return tierUpCounter.checkIfThresholdCrossedAndSet(codeBlock->baselineVersion());
127 }
128
129 void JITCode::optimizeNextInvocation(CodeBlock* codeBlock)
130 {
131     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
132     if (Options::verboseOSR())
133         dataLog(*codeBlock, ": FTL-optimizing next invocation.\n");
134     tierUpCounter.setNewThreshold(0, codeBlock->baselineVersion());
135 }
136
137 void JITCode::dontOptimizeAnytimeSoon(CodeBlock* codeBlock)
138 {
139     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
140     if (Options::verboseOSR())
141         dataLog(*codeBlock, ": Not FTL-optimizing anytime soon.\n");
142     tierUpCounter.deferIndefinitely();
143 }
144
145 void JITCode::optimizeAfterWarmUp(CodeBlock* codeBlock)
146 {
147     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
148     if (Options::verboseOSR())
149         dataLog(*codeBlock, ": FTL-optimizing after warm-up.\n");
150     CodeBlock* baseline = codeBlock->baselineVersion();
151     tierUpCounter.setNewThreshold(
152         baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeAfterWarmUp()),
153         baseline);
154 }
155
156 void JITCode::optimizeSoon(CodeBlock* codeBlock)
157 {
158     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
159     if (Options::verboseOSR())
160         dataLog(*codeBlock, ": FTL-optimizing soon.\n");
161     CodeBlock* baseline = codeBlock->baselineVersion();
162     tierUpCounter.setNewThreshold(
163         baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeSoon()),
164         baseline);
165 }
166
167 void JITCode::forceOptimizationSlowPathConcurrently(CodeBlock* codeBlock)
168 {
169     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
170     if (Options::verboseOSR())
171         dataLog(*codeBlock, ": Forcing slow path concurrently for FTL entry.\n");
172     tierUpCounter.forceSlowPathConcurrently();
173 }
174
175 void JITCode::setOptimizationThresholdBasedOnCompilationResult(
176     CodeBlock* codeBlock, CompilationResult result)
177 {
178     ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
179     switch (result) {
180     case CompilationSuccessful:
181         optimizeNextInvocation(codeBlock);
182         codeBlock->baselineVersion()->m_hasBeenCompiledWithFTL = true;
183         return;
184     case CompilationFailed:
185         dontOptimizeAnytimeSoon(codeBlock);
186         codeBlock->baselineVersion()->m_didFailFTLCompilation = true;
187         return;
188     case CompilationDeferred:
189         optimizeAfterWarmUp(codeBlock);
190         return;
191     case CompilationInvalidated:
192         // This is weird - it will only happen in cases when the DFG code block (i.e.
193         // the code block that this JITCode belongs to) is also invalidated. So it
194         // doesn't really matter what we do. But, we do the right thing anyway. Note
195         // that us counting the reoptimization actually means that we might count it
196         // twice. But that's generally OK. It's better to overcount reoptimizations
197         // than it is to undercount them.
198         codeBlock->baselineVersion()->countReoptimization();
199         optimizeAfterWarmUp(codeBlock);
200         return;
201     }
202     RELEASE_ASSERT_NOT_REACHED();
203 }
204 #endif // ENABLE(FTL_JIT)
205
206 void JITCode::validateReferences(const TrackedReferences& trackedReferences)
207 {
208     common.validateReferences(trackedReferences);
209     
210     for (OSREntryData& entry : osrEntry) {
211         for (unsigned i = entry.m_expectedValues.size(); i--;)
212             entry.m_expectedValues[i].validateReferences(trackedReferences);
213     }
214     
215     minifiedDFG.validateReferences(trackedReferences);
216 }
217
218 } } // namespace JSC::DFG
219
220 #endif // ENABLE(DFG_JIT)