JITMathIC was misusing maxJumpReplacementSize
[WebKit-https.git] / Source / JavaScriptCore / jit / JITMathIC.h
1 /*
2  * Copyright (C) 2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #pragma once
27
28 #if ENABLE(JIT)
29
30 #include "ArithProfile.h"
31 #include "CCallHelpers.h"
32 #include "JITAddGenerator.h"
33 #include "JITMathICInlineResult.h"
34 #include "JITMulGenerator.h"
35 #include "JITSubGenerator.h"
36 #include "LinkBuffer.h"
37 #include "Repatch.h"
38 #include "SnippetOperand.h"
39
40 namespace JSC {
41
42 class LinkBuffer;
43
44 struct MathICGenerationState {
45     MacroAssembler::Label fastPathStart;
46     MacroAssembler::Label fastPathEnd;
47     MacroAssembler::Label slowPathStart;
48     MacroAssembler::Call slowPathCall;
49     MacroAssembler::JumpList slowPathJumps;
50     bool shouldSlowPathRepatch;
51 };
52
53 #define ENABLE_MATH_IC_STATS 0
54
55 template <typename GeneratorType>
56 class JITMathIC {
57 public:
58     CodeLocationLabel doneLocation() { return m_inlineStart.labelAtOffset(m_inlineSize); }
59     CodeLocationLabel slowPathStartLocation() { return m_inlineStart.labelAtOffset(m_deltaFromStartToSlowPathStart); }
60     CodeLocationCall slowPathCallLocation() { return m_inlineStart.callAtOffset(m_deltaFromStartToSlowPathCallLocation); }
61
62     bool isLeftOperandValidConstant() const { return m_generator.isLeftOperandValidConstant(); }
63     bool isRightOperandValidConstant() const { return m_generator.isRightOperandValidConstant(); }
64
65     bool generateInline(CCallHelpers& jit, MathICGenerationState& state, bool shouldEmitProfiling = true)
66     {
67 #if CPU(ARM_TRADITIONAL)
68         // FIXME: Remove this workaround once the proper fixes are landed.
69         // [ARM] Disable Inline Caching on ARMv7 traditional until proper fix
70         // https://bugs.webkit.org/show_bug.cgi?id=159759
71         return false;
72 #endif
73
74         state.fastPathStart = jit.label();
75         size_t startSize = jit.m_assembler.buffer().codeSize();
76
77         if (ArithProfile* arithProfile = m_generator.arithProfile()) {
78             if (arithProfile->lhsObservedType().isEmpty() || arithProfile->rhsObservedType().isEmpty()) {
79                 // It looks like the MathIC has yet to execute. We don't want to emit code in this
80                 // case for a couple reasons. First, the operation may never execute, so if we don't emit
81                 // code, it's a win. Second, if the operation does execute, we can emit better code
82                 // once we have an idea about the types of lhs and rhs.
83                 state.slowPathJumps.append(jit.patchableJump());
84                 size_t inlineSize = jit.m_assembler.buffer().codeSize() - startSize;
85                 ASSERT_UNUSED(inlineSize, static_cast<ptrdiff_t>(inlineSize) <= MacroAssembler::patchableJumpSize());
86                 state.shouldSlowPathRepatch = true;
87                 state.fastPathEnd = jit.label();
88                 ASSERT(!m_generateFastPathOnRepatch); // We should have gathered some observed type info for lhs and rhs before trying to regenerate again.
89                 m_generateFastPathOnRepatch = true;
90                 return true;
91             }
92         }
93
94         JITMathICInlineResult result = m_generator.generateInline(jit, state);
95
96         switch (result) {
97         case JITMathICInlineResult::GeneratedFastPath: {
98             size_t inlineSize = jit.m_assembler.buffer().codeSize() - startSize;
99             if (static_cast<ptrdiff_t>(inlineSize) < MacroAssembler::patchableJumpSize()) {
100                 size_t nopsToEmitInBytes = MacroAssembler::patchableJumpSize() - inlineSize;
101                 jit.emitNops(nopsToEmitInBytes);
102             }
103             state.shouldSlowPathRepatch = true;
104             state.fastPathEnd = jit.label();
105             return true;
106         }
107         case JITMathICInlineResult::GenerateFullSnippet: {
108             MacroAssembler::JumpList endJumpList;
109             bool result = m_generator.generateFastPath(jit, endJumpList, state.slowPathJumps, shouldEmitProfiling);
110             if (result) {
111                 state.fastPathEnd = jit.label();
112                 state.shouldSlowPathRepatch = false;
113                 endJumpList.link(&jit);
114                 return true;
115             }
116             return false;
117         }
118         case JITMathICInlineResult::DontGenerate: {
119             return false;
120         }
121         default:
122             ASSERT_NOT_REACHED();
123         }
124
125         return false;
126     }
127
128     void generateOutOfLine(VM& vm, CodeBlock* codeBlock, FunctionPtr callReplacement)
129     {
130         auto linkJumpToOutOfLineSnippet = [&] () {
131             CCallHelpers jit(&vm, codeBlock);
132             auto jump = jit.jump();
133             // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
134             bool needsBranchCompaction = false;
135             RELEASE_ASSERT(jit.m_assembler.buffer().codeSize() <= static_cast<size_t>(m_inlineSize));
136             LinkBuffer linkBuffer(jit, m_inlineStart.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
137             RELEASE_ASSERT(linkBuffer.isValid());
138             linkBuffer.link(jump, CodeLocationLabel(m_code.code()));
139             FINALIZE_CODE(linkBuffer, ("JITMathIC: linking constant jump to out of line stub"));
140         };
141
142         auto replaceCall = [&] () {
143             ftlThunkAwareRepatchCall(codeBlock, slowPathCallLocation(), callReplacement);
144         };
145
146         bool shouldEmitProfiling = !JITCode::isOptimizingJIT(codeBlock->jitType());
147
148         if (m_generateFastPathOnRepatch) {
149
150             CCallHelpers jit(&vm, codeBlock);
151             MathICGenerationState generationState;
152             bool generatedInline = generateInline(jit, generationState, shouldEmitProfiling);
153
154             // We no longer want to try to regenerate the fast path.
155             m_generateFastPathOnRepatch = false;
156
157             if (generatedInline) {
158                 auto jumpToDone = jit.jump();
159
160                 LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
161                 if (!linkBuffer.didFailToAllocate()) {
162                     linkBuffer.link(generationState.slowPathJumps, slowPathStartLocation());
163                     linkBuffer.link(jumpToDone, doneLocation());
164
165                     m_code = FINALIZE_CODE_FOR(
166                         codeBlock, linkBuffer, ("JITMathIC: generating out of line fast IC snippet"));
167
168                     if (!generationState.shouldSlowPathRepatch) {
169                         // We won't need to regenerate, so we can wire the slow path call
170                         // to a non repatching variant.
171                         replaceCall();
172                     }
173
174                     linkJumpToOutOfLineSnippet();
175
176                     return;
177                 }
178             }
179             
180             // We weren't able to generate an out of line fast path.
181             // We just generate the snippet in its full generality.
182         }
183
184         // We rewire to the alternate regardless of whether or not we can allocate the out of line path
185         // because if we fail allocating the out of line path, we don't want to waste time trying to
186         // allocate it in the future.
187         replaceCall();
188
189         {
190             CCallHelpers jit(&vm, codeBlock);
191
192             MacroAssembler::JumpList endJumpList; 
193             MacroAssembler::JumpList slowPathJumpList; 
194
195             bool emittedFastPath = m_generator.generateFastPath(jit, endJumpList, slowPathJumpList, shouldEmitProfiling);
196             if (!emittedFastPath)
197                 return;
198             endJumpList.append(jit.jump());
199
200             LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
201             if (linkBuffer.didFailToAllocate())
202                 return;
203
204             linkBuffer.link(endJumpList, doneLocation());
205             linkBuffer.link(slowPathJumpList, slowPathStartLocation());
206
207             m_code = FINALIZE_CODE_FOR(
208                 codeBlock, linkBuffer, ("JITMathIC: generating out of line IC snippet"));
209         }
210
211         linkJumpToOutOfLineSnippet();
212     }
213
214     void finalizeInlineCode(const MathICGenerationState& state, LinkBuffer& linkBuffer)
215     {
216         CodeLocationLabel start = linkBuffer.locationOf(state.fastPathStart);
217         m_inlineStart = start;
218
219         m_inlineSize = MacroAssembler::differenceBetweenCodePtr(
220             start, linkBuffer.locationOf(state.fastPathEnd));
221         ASSERT(m_inlineSize > 0);
222
223         m_deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
224             start, linkBuffer.locationOf(state.slowPathCall));
225         m_deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
226             start, linkBuffer.locationOf(state.slowPathStart));
227     }
228
229 #if ENABLE(MATH_IC_STATS)
230     size_t m_generatedCodeSize { 0 };
231     size_t codeSize() const
232     {
233         size_t result = m_generatedCodeSize;
234         if (m_code)
235             result += m_code.size();
236         return result;
237     }
238 #endif
239
240     MacroAssemblerCodeRef m_code;
241     CodeLocationLabel m_inlineStart;
242     int32_t m_inlineSize;
243     int32_t m_deltaFromStartToSlowPathCallLocation;
244     int32_t m_deltaFromStartToSlowPathStart;
245     bool m_generateFastPathOnRepatch { false };
246     GeneratorType m_generator;
247 };
248
249 typedef JITMathIC<JITAddGenerator> JITAddIC;
250 typedef JITMathIC<JITMulGenerator> JITMulIC;
251 typedef JITMathIC<JITSubGenerator> JITSubIC;
252
253 } // namespace JSC
254
255 #endif // ENABLE(JIT)