MathICs should be able to emit only a jump along the inline path when they don't...
[WebKit-https.git] / Source / JavaScriptCore / jit / JITMathIC.h
1 /*
2  * Copyright (C) 2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #pragma once
27
28 #if ENABLE(JIT)
29
30 #include "ArithProfile.h"
31 #include "CCallHelpers.h"
32 #include "JITAddGenerator.h"
33 #include "JITMathICInlineResult.h"
34 #include "JITMulGenerator.h"
35 #include "LinkBuffer.h"
36 #include "Repatch.h"
37 #include "SnippetOperand.h"
38
39 namespace JSC {
40
41 class LinkBuffer;
42
43 struct MathICGenerationState {
44     MacroAssembler::Label fastPathStart;
45     MacroAssembler::Label fastPathEnd;
46     MacroAssembler::Label slowPathStart;
47     MacroAssembler::Call slowPathCall;
48     MacroAssembler::JumpList slowPathJumps;
49     bool shouldSlowPathRepatch;
50 };
51
52 #define ENABLE_MATH_IC_STATS 0
53
54 template <typename GeneratorType>
55 class JITMathIC {
56 public:
57     CodeLocationLabel doneLocation() { return m_inlineStart.labelAtOffset(m_inlineSize); }
58     CodeLocationLabel slowPathStartLocation() { return m_inlineStart.labelAtOffset(m_deltaFromStartToSlowPathStart); }
59     CodeLocationCall slowPathCallLocation() { return m_inlineStart.callAtOffset(m_deltaFromStartToSlowPathCallLocation); }
60
61     bool isLeftOperandValidConstant() const { return m_generator.isLeftOperandValidConstant(); }
62     bool isRightOperandValidConstant() const { return m_generator.isRightOperandValidConstant(); }
63
64     bool generateInline(CCallHelpers& jit, MathICGenerationState& state, bool shouldEmitProfiling = true)
65     {
66         state.fastPathStart = jit.label();
67         size_t startSize = jit.m_assembler.buffer().codeSize();
68
69         if (ArithProfile* arithProfile = m_generator.arithProfile()) {
70             if (arithProfile->lhsObservedType().isEmpty() || arithProfile->rhsObservedType().isEmpty()) {
71                 // It looks like the MathIC has yet to execute. We don't want to emit code in this
72                 // case for a couple reasons. First, the operation may never execute, so if we don't emit
73                 // code, it's a win. Second, if the operation does execute, we can emit better code
74                 // once we have an idea about the types of lhs and rhs.
75                 state.slowPathJumps.append(jit.patchableJump());
76                 state.shouldSlowPathRepatch = true;
77                 state.fastPathEnd = jit.label();
78                 ASSERT(!m_generateFastPathOnRepatch); // We should have gathered some observed type info for lhs and rhs before trying to regenerate again.
79                 m_generateFastPathOnRepatch = true;
80                 size_t inlineSize = jit.m_assembler.buffer().codeSize() - startSize;
81                 ASSERT_UNUSED(inlineSize, static_cast<ptrdiff_t>(inlineSize) <= MacroAssembler::maxJumpReplacementSize());
82                 return true;
83             }
84         }
85
86         JITMathICInlineResult result = m_generator.generateInline(jit, state);
87
88         switch (result) {
89         case JITMathICInlineResult::GeneratedFastPath: {
90             size_t inlineSize = jit.m_assembler.buffer().codeSize() - startSize;
91             if (static_cast<ptrdiff_t>(inlineSize) < MacroAssembler::maxJumpReplacementSize()) {
92                 size_t nopsToEmitInBytes = MacroAssembler::maxJumpReplacementSize() - inlineSize;
93                 jit.emitNops(nopsToEmitInBytes);
94             }
95             state.shouldSlowPathRepatch = true;
96             state.fastPathEnd = jit.label();
97             return true;
98         }
99         case JITMathICInlineResult::GenerateFullSnippet: {
100             MacroAssembler::JumpList endJumpList;
101             bool result = m_generator.generateFastPath(jit, endJumpList, state.slowPathJumps, shouldEmitProfiling);
102             if (result) {
103                 state.fastPathEnd = jit.label();
104                 state.shouldSlowPathRepatch = false;
105                 endJumpList.link(&jit);
106                 return true;
107             }
108             return false;
109         }
110         case JITMathICInlineResult::DontGenerate: {
111             return false;
112         }
113         default:
114             ASSERT_NOT_REACHED();
115         }
116
117         return false;
118     }
119
120     void generateOutOfLine(VM& vm, CodeBlock* codeBlock, FunctionPtr callReplacement)
121     {
122         auto linkJumpToOutOfLineSnippet = [&] () {
123             CCallHelpers jit(&vm, codeBlock);
124             auto jump = jit.jump();
125             // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
126             bool needsBranchCompaction = false;
127             RELEASE_ASSERT(jit.m_assembler.buffer().codeSize() <= static_cast<size_t>(m_inlineSize));
128             LinkBuffer linkBuffer(jit, m_inlineStart.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
129             RELEASE_ASSERT(linkBuffer.isValid());
130             linkBuffer.link(jump, CodeLocationLabel(m_code.code()));
131             FINALIZE_CODE(linkBuffer, ("JITMathIC: linking constant jump to out of line stub"));
132         };
133
134         auto replaceCall = [&] () {
135             ftlThunkAwareRepatchCall(codeBlock, slowPathCallLocation(), callReplacement);
136         };
137
138         bool shouldEmitProfiling = !JITCode::isOptimizingJIT(codeBlock->jitType());
139
140         if (m_generateFastPathOnRepatch) {
141
142             CCallHelpers jit(&vm, codeBlock);
143             MathICGenerationState generationState;
144             bool generatedInline = generateInline(jit, generationState, shouldEmitProfiling);
145
146             // We no longer want to try to regenerate the fast path.
147             m_generateFastPathOnRepatch = false;
148
149             if (generatedInline) {
150                 auto jumpToDone = jit.jump();
151
152                 LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
153                 if (!linkBuffer.didFailToAllocate()) {
154                     linkBuffer.link(generationState.slowPathJumps, slowPathStartLocation());
155                     linkBuffer.link(jumpToDone, doneLocation());
156
157                     m_code = FINALIZE_CODE_FOR(
158                         codeBlock, linkBuffer, ("JITMathIC: generating out of line fast IC snippet"));
159
160                     if (!generationState.shouldSlowPathRepatch) {
161                         // We won't need to regenerate, so we can wire the slow path call
162                         // to a non repatching variant.
163                         replaceCall();
164                     }
165
166                     linkJumpToOutOfLineSnippet();
167
168                     return;
169                 }
170             }
171             
172             // We weren't able to generate an out of line fast path.
173             // We just generate the snippet in its full generality.
174         }
175
176         // We rewire to the alternate regardless of whether or not we can allocate the out of line path
177         // because if we fail allocating the out of line path, we don't want to waste time trying to
178         // allocate it in the future.
179         replaceCall();
180
181         {
182             CCallHelpers jit(&vm, codeBlock);
183
184             MacroAssembler::JumpList endJumpList; 
185             MacroAssembler::JumpList slowPathJumpList; 
186
187             bool emittedFastPath = m_generator.generateFastPath(jit, endJumpList, slowPathJumpList, shouldEmitProfiling);
188             if (!emittedFastPath)
189                 return;
190             endJumpList.append(jit.jump());
191
192             LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
193             if (linkBuffer.didFailToAllocate())
194                 return;
195
196             linkBuffer.link(endJumpList, doneLocation());
197             linkBuffer.link(slowPathJumpList, slowPathStartLocation());
198
199             m_code = FINALIZE_CODE_FOR(
200                 codeBlock, linkBuffer, ("JITMathIC: generating out of line IC snippet"));
201         }
202
203         linkJumpToOutOfLineSnippet();
204     }
205
206     void finalizeInlineCode(const MathICGenerationState& state, LinkBuffer& linkBuffer)
207     {
208         CodeLocationLabel start = linkBuffer.locationOf(state.fastPathStart);
209         m_inlineStart = start;
210
211         m_inlineSize = MacroAssembler::differenceBetweenCodePtr(
212             start, linkBuffer.locationOf(state.fastPathEnd));
213         ASSERT(m_inlineSize > 0);
214
215         m_deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
216             start, linkBuffer.locationOf(state.slowPathCall));
217         m_deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
218             start, linkBuffer.locationOf(state.slowPathStart));
219     }
220
221 #if ENABLE(MATH_IC_STATS)
222     size_t m_generatedCodeSize { 0 };
223     size_t codeSize() const
224     {
225         size_t result = m_generatedCodeSize;
226         if (m_code)
227             result += m_code.size();
228         return result;
229     }
230 #endif
231
232     MacroAssemblerCodeRef m_code;
233     CodeLocationLabel m_inlineStart;
234     int32_t m_inlineSize;
235     int32_t m_deltaFromStartToSlowPathCallLocation;
236     int32_t m_deltaFromStartToSlowPathStart;
237     bool m_generateFastPathOnRepatch { false };
238     GeneratorType m_generator;
239 };
240
241 typedef JITMathIC<JITAddGenerator> JITAddIC;
242 typedef JITMathIC<JITMulGenerator> JITMulIC;
243
244 } // namespace JSC
245
246 #endif // ENABLE(JIT)