2 * Copyright (C) 2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "ArithProfile.h"
31 #include "CCallHelpers.h"
32 #include "JITAddGenerator.h"
33 #include "JITMathICInlineResult.h"
34 #include "JITMulGenerator.h"
35 #include "JITSubGenerator.h"
36 #include "LinkBuffer.h"
38 #include "SnippetOperand.h"
44 struct MathICGenerationState {
45 MacroAssembler::Label fastPathStart;
46 MacroAssembler::Label fastPathEnd;
47 MacroAssembler::Label slowPathStart;
48 MacroAssembler::Call slowPathCall;
49 MacroAssembler::JumpList slowPathJumps;
50 bool shouldSlowPathRepatch;
53 #define ENABLE_MATH_IC_STATS 0
55 template <typename GeneratorType>
58 CodeLocationLabel doneLocation() { return m_inlineStart.labelAtOffset(m_inlineSize); }
59 CodeLocationLabel slowPathStartLocation() { return m_inlineStart.labelAtOffset(m_deltaFromStartToSlowPathStart); }
60 CodeLocationCall slowPathCallLocation() { return m_inlineStart.callAtOffset(m_deltaFromStartToSlowPathCallLocation); }
62 bool isLeftOperandValidConstant() const { return m_generator.isLeftOperandValidConstant(); }
63 bool isRightOperandValidConstant() const { return m_generator.isRightOperandValidConstant(); }
65 bool generateInline(CCallHelpers& jit, MathICGenerationState& state, bool shouldEmitProfiling = true)
67 #if CPU(ARM_TRADITIONAL)
68 // FIXME: Remove this workaround once the proper fixes are landed.
69 // [ARM] Disable Inline Caching on ARMv7 traditional until proper fix
70 // https://bugs.webkit.org/show_bug.cgi?id=159759
74 state.fastPathStart = jit.label();
75 size_t startSize = jit.m_assembler.buffer().codeSize();
77 if (ArithProfile* arithProfile = m_generator.arithProfile()) {
78 if (arithProfile->lhsObservedType().isEmpty() || arithProfile->rhsObservedType().isEmpty()) {
79 // It looks like the MathIC has yet to execute. We don't want to emit code in this
80 // case for a couple reasons. First, the operation may never execute, so if we don't emit
81 // code, it's a win. Second, if the operation does execute, we can emit better code
82 // once we have an idea about the types of lhs and rhs.
83 state.slowPathJumps.append(jit.patchableJump());
84 state.shouldSlowPathRepatch = true;
85 state.fastPathEnd = jit.label();
86 ASSERT(!m_generateFastPathOnRepatch); // We should have gathered some observed type info for lhs and rhs before trying to regenerate again.
87 m_generateFastPathOnRepatch = true;
88 size_t inlineSize = jit.m_assembler.buffer().codeSize() - startSize;
89 ASSERT_UNUSED(inlineSize, static_cast<ptrdiff_t>(inlineSize) <= MacroAssembler::maxJumpReplacementSize());
94 JITMathICInlineResult result = m_generator.generateInline(jit, state);
97 case JITMathICInlineResult::GeneratedFastPath: {
98 size_t inlineSize = jit.m_assembler.buffer().codeSize() - startSize;
99 if (static_cast<ptrdiff_t>(inlineSize) < MacroAssembler::maxJumpReplacementSize()) {
100 size_t nopsToEmitInBytes = MacroAssembler::maxJumpReplacementSize() - inlineSize;
101 jit.emitNops(nopsToEmitInBytes);
103 state.shouldSlowPathRepatch = true;
104 state.fastPathEnd = jit.label();
107 case JITMathICInlineResult::GenerateFullSnippet: {
108 MacroAssembler::JumpList endJumpList;
109 bool result = m_generator.generateFastPath(jit, endJumpList, state.slowPathJumps, shouldEmitProfiling);
111 state.fastPathEnd = jit.label();
112 state.shouldSlowPathRepatch = false;
113 endJumpList.link(&jit);
118 case JITMathICInlineResult::DontGenerate: {
122 ASSERT_NOT_REACHED();
128 void generateOutOfLine(VM& vm, CodeBlock* codeBlock, FunctionPtr callReplacement)
130 auto linkJumpToOutOfLineSnippet = [&] () {
131 CCallHelpers jit(&vm, codeBlock);
132 auto jump = jit.jump();
133 // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
134 bool needsBranchCompaction = false;
135 RELEASE_ASSERT(jit.m_assembler.buffer().codeSize() <= static_cast<size_t>(m_inlineSize));
136 LinkBuffer linkBuffer(jit, m_inlineStart.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
137 RELEASE_ASSERT(linkBuffer.isValid());
138 linkBuffer.link(jump, CodeLocationLabel(m_code.code()));
139 FINALIZE_CODE(linkBuffer, ("JITMathIC: linking constant jump to out of line stub"));
142 auto replaceCall = [&] () {
143 ftlThunkAwareRepatchCall(codeBlock, slowPathCallLocation(), callReplacement);
146 bool shouldEmitProfiling = !JITCode::isOptimizingJIT(codeBlock->jitType());
148 if (m_generateFastPathOnRepatch) {
150 CCallHelpers jit(&vm, codeBlock);
151 MathICGenerationState generationState;
152 bool generatedInline = generateInline(jit, generationState, shouldEmitProfiling);
154 // We no longer want to try to regenerate the fast path.
155 m_generateFastPathOnRepatch = false;
157 if (generatedInline) {
158 auto jumpToDone = jit.jump();
160 LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
161 if (!linkBuffer.didFailToAllocate()) {
162 linkBuffer.link(generationState.slowPathJumps, slowPathStartLocation());
163 linkBuffer.link(jumpToDone, doneLocation());
165 m_code = FINALIZE_CODE_FOR(
166 codeBlock, linkBuffer, ("JITMathIC: generating out of line fast IC snippet"));
168 if (!generationState.shouldSlowPathRepatch) {
169 // We won't need to regenerate, so we can wire the slow path call
170 // to a non repatching variant.
174 linkJumpToOutOfLineSnippet();
180 // We weren't able to generate an out of line fast path.
181 // We just generate the snippet in its full generality.
184 // We rewire to the alternate regardless of whether or not we can allocate the out of line path
185 // because if we fail allocating the out of line path, we don't want to waste time trying to
186 // allocate it in the future.
190 CCallHelpers jit(&vm, codeBlock);
192 MacroAssembler::JumpList endJumpList;
193 MacroAssembler::JumpList slowPathJumpList;
195 bool emittedFastPath = m_generator.generateFastPath(jit, endJumpList, slowPathJumpList, shouldEmitProfiling);
196 if (!emittedFastPath)
198 endJumpList.append(jit.jump());
200 LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
201 if (linkBuffer.didFailToAllocate())
204 linkBuffer.link(endJumpList, doneLocation());
205 linkBuffer.link(slowPathJumpList, slowPathStartLocation());
207 m_code = FINALIZE_CODE_FOR(
208 codeBlock, linkBuffer, ("JITMathIC: generating out of line IC snippet"));
211 linkJumpToOutOfLineSnippet();
214 void finalizeInlineCode(const MathICGenerationState& state, LinkBuffer& linkBuffer)
216 CodeLocationLabel start = linkBuffer.locationOf(state.fastPathStart);
217 m_inlineStart = start;
219 m_inlineSize = MacroAssembler::differenceBetweenCodePtr(
220 start, linkBuffer.locationOf(state.fastPathEnd));
221 ASSERT(m_inlineSize > 0);
223 m_deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
224 start, linkBuffer.locationOf(state.slowPathCall));
225 m_deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
226 start, linkBuffer.locationOf(state.slowPathStart));
229 #if ENABLE(MATH_IC_STATS)
230 size_t m_generatedCodeSize { 0 };
231 size_t codeSize() const
233 size_t result = m_generatedCodeSize;
235 result += m_code.size();
240 MacroAssemblerCodeRef m_code;
241 CodeLocationLabel m_inlineStart;
242 int32_t m_inlineSize;
243 int32_t m_deltaFromStartToSlowPathCallLocation;
244 int32_t m_deltaFromStartToSlowPathStart;
245 bool m_generateFastPathOnRepatch { false };
246 GeneratorType m_generator;
249 typedef JITMathIC<JITAddGenerator> JITAddIC;
250 typedef JITMathIC<JITMulGenerator> JITMulIC;
251 typedef JITMathIC<JITSubGenerator> JITSubIC;
255 #endif // ENABLE(JIT)