d3dcfe0347550b2f04809089b1edbdfc81cb8628
[WebKit-https.git] / Source / JavaScriptCore / bytecode / InlineAccess.cpp
1 /*
2  * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "InlineAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "CCallHelpers.h"
32 #include "JSArray.h"
33 #include "JSCellInlines.h"
34 #include "LinkBuffer.h"
35 #include "ScratchRegisterAllocator.h"
36 #include "Structure.h"
37 #include "StructureStubInfo.h"
38
39 namespace JSC {
40
41 void InlineAccess::dumpCacheSizesAndCrash()
42 {
43     GPRReg base = GPRInfo::regT0;
44     GPRReg value = GPRInfo::regT1;
45 #if USE(JSVALUE32_64)
46     JSValueRegs regs(base, value);
47 #else
48     JSValueRegs regs(base);
49 #endif
50
51     {
52         CCallHelpers jit;
53
54         GPRReg scratchGPR = value;
55         jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), value);
56         jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), value);
57         jit.patchableBranch32(
58             CCallHelpers::NotEqual, value, CCallHelpers::TrustedImm32(IsArray | ContiguousShape));
59         jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value);
60         jit.load32(CCallHelpers::Address(value, ArrayStorage::lengthOffset()), value);
61         jit.boxInt32(scratchGPR, regs);
62
63         dataLog("array length size: ", jit.m_assembler.buffer().codeSize(), "\n");
64     }
65
66     {
67         CCallHelpers jit;
68
69         jit.patchableBranch32(
70             MacroAssembler::NotEqual,
71             MacroAssembler::Address(base, JSCell::structureIDOffset()),
72             MacroAssembler::TrustedImm32(0x000ab21ca));
73         jit.loadPtr(
74             CCallHelpers::Address(base, JSObject::butterflyOffset()),
75             value);
76         GPRReg storageGPR = value;
77         jit.loadValue(
78             CCallHelpers::Address(storageGPR, 0x000ab21ca), regs);
79
80         dataLog("out of line offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
81     }
82
83     {
84         CCallHelpers jit;
85
86         jit.patchableBranch32(
87             MacroAssembler::NotEqual,
88             MacroAssembler::Address(base, JSCell::structureIDOffset()),
89             MacroAssembler::TrustedImm32(0x000ab21ca));
90         jit.loadValue(
91             MacroAssembler::Address(base, 0x000ab21ca), regs);
92
93         dataLog("inline offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
94     }
95
96     {
97         CCallHelpers jit;
98
99         jit.patchableBranch32(
100             MacroAssembler::NotEqual,
101             MacroAssembler::Address(base, JSCell::structureIDOffset()),
102             MacroAssembler::TrustedImm32(0x000ab21ca));
103
104         jit.storeValue(
105             regs, MacroAssembler::Address(base, 0x000ab21ca));
106
107         dataLog("replace cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
108     }
109
110     {
111         CCallHelpers jit;
112
113         jit.patchableBranch32(
114             MacroAssembler::NotEqual,
115             MacroAssembler::Address(base, JSCell::structureIDOffset()),
116             MacroAssembler::TrustedImm32(0x000ab21ca));
117
118         jit.loadPtr(MacroAssembler::Address(base, JSObject::butterflyOffset()), value);
119         jit.storeValue(
120             regs,
121             MacroAssembler::Address(base, 120342));
122
123         dataLog("replace out of line cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
124     }
125
126     CRASH();
127 }
128
129
130 template <typename Function>
131 ALWAYS_INLINE static bool linkCodeInline(const char* name, CCallHelpers& jit, StructureStubInfo& stubInfo, const Function& function)
132 {
133     if (jit.m_assembler.buffer().codeSize() <= stubInfo.patch.inlineSize) {
134         bool needsBranchCompaction = false;
135         LinkBuffer linkBuffer(jit, stubInfo.patch.start, stubInfo.patch.inlineSize, JITCompilationMustSucceed, needsBranchCompaction);
136         ASSERT(linkBuffer.isValid());
137         function(linkBuffer);
138         FINALIZE_CODE(linkBuffer, NoPtrTag, "InlineAccessType: '%s'", name);
139         return true;
140     }
141
142     // This is helpful when determining the size for inline ICs on various
143     // platforms. You want to choose a size that usually succeeds, but sometimes
144     // there may be variability in the length of the code we generate just because
145     // of randomness. It's helpful to flip this on when running tests or browsing
146     // the web just to see how often it fails. You don't want an IC size that always fails.
147     const bool failIfCantInline = false;
148     if (failIfCantInline) {
149         dataLog("Failure for: ", name, "\n");
150         dataLog("real size: ", jit.m_assembler.buffer().codeSize(), " inline size:", stubInfo.patch.inlineSize, "\n");
151         CRASH();
152     }
153
154     return false;
155 }
156
157 bool InlineAccess::generateSelfPropertyAccess(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
158 {
159     CCallHelpers jit;
160     
161     GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
162     JSValueRegs value = stubInfo.valueRegs();
163
164     auto branchToSlowPath = jit.patchableBranch32(
165         MacroAssembler::NotEqual,
166         MacroAssembler::Address(base, JSCell::structureIDOffset()),
167         MacroAssembler::TrustedImm32(bitwise_cast<uint32_t>(structure->id())));
168     GPRReg storage;
169     if (isInlineOffset(offset))
170         storage = base;
171     else {
172         jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
173         storage = value.payloadGPR();
174     }
175     
176     jit.loadValue(
177         MacroAssembler::Address(storage, offsetRelativeToBase(offset)), value);
178
179     bool linkedCodeInline = linkCodeInline("property access", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
180         linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
181     });
182     return linkedCodeInline;
183 }
184
185 ALWAYS_INLINE static GPRReg getScratchRegister(StructureStubInfo& stubInfo)
186 {
187     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
188     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseGPR));
189     allocator.lock(static_cast<GPRReg>(stubInfo.patch.valueGPR));
190 #if USE(JSVALUE32_64)
191     allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
192     allocator.lock(static_cast<GPRReg>(stubInfo.patch.valueTagGPR));
193 #endif
194     GPRReg scratch = allocator.allocateScratchGPR();
195     if (allocator.didReuseRegisters())
196         return InvalidGPRReg;
197     return scratch;
198 }
199
200 ALWAYS_INLINE static bool hasFreeRegister(StructureStubInfo& stubInfo)
201 {
202     return getScratchRegister(stubInfo) != InvalidGPRReg;
203 }
204
205 bool InlineAccess::canGenerateSelfPropertyReplace(StructureStubInfo& stubInfo, PropertyOffset offset)
206 {
207     if (isInlineOffset(offset))
208         return true;
209
210     return hasFreeRegister(stubInfo);
211 }
212
213 bool InlineAccess::generateSelfPropertyReplace(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
214 {
215     ASSERT(canGenerateSelfPropertyReplace(stubInfo, offset));
216
217     CCallHelpers jit;
218
219     GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
220     JSValueRegs value = stubInfo.valueRegs();
221
222     auto branchToSlowPath = jit.patchableBranch32(
223         MacroAssembler::NotEqual,
224         MacroAssembler::Address(base, JSCell::structureIDOffset()),
225         MacroAssembler::TrustedImm32(bitwise_cast<uint32_t>(structure->id())));
226
227     GPRReg storage;
228     if (isInlineOffset(offset))
229         storage = base;
230     else {
231         storage = getScratchRegister(stubInfo);
232         ASSERT(storage != InvalidGPRReg);
233         jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), storage);
234     }
235
236     jit.storeValue(
237         value, MacroAssembler::Address(storage, offsetRelativeToBase(offset)));
238
239     bool linkedCodeInline = linkCodeInline("property replace", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
240         linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
241     });
242     return linkedCodeInline;
243 }
244
245 bool InlineAccess::isCacheableArrayLength(StructureStubInfo& stubInfo, JSArray* array)
246 {
247     ASSERT(array->indexingType() & IsArray);
248
249     if (!hasFreeRegister(stubInfo))
250         return false;
251
252     return array->indexingType() == ArrayWithInt32
253         || array->indexingType() == ArrayWithDouble
254         || array->indexingType() == ArrayWithContiguous;
255 }
256
257 bool InlineAccess::generateArrayLength(StructureStubInfo& stubInfo, JSArray* array)
258 {
259     ASSERT(isCacheableArrayLength(stubInfo, array));
260
261     CCallHelpers jit;
262
263     GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
264     JSValueRegs value = stubInfo.valueRegs();
265     GPRReg scratch = getScratchRegister(stubInfo);
266
267     jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), scratch);
268     jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), scratch);
269     auto branchToSlowPath = jit.patchableBranch32(
270         CCallHelpers::NotEqual, scratch, CCallHelpers::TrustedImm32(array->indexingType()));
271     jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
272     jit.load32(CCallHelpers::Address(value.payloadGPR(), ArrayStorage::lengthOffset()), value.payloadGPR());
273     jit.boxInt32(value.payloadGPR(), value);
274
275     bool linkedCodeInline = linkCodeInline("array length", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
276         linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
277     });
278     return linkedCodeInline;
279 }
280
281 void InlineAccess::rewireStubAsJump(StructureStubInfo& stubInfo, CodeLocationLabel<JITStubRoutinePtrTag> target)
282 {
283     CCallHelpers jit;
284
285     auto jump = jit.jump();
286
287     // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
288     bool needsBranchCompaction = false;
289     LinkBuffer linkBuffer(jit, stubInfo.patch.start, jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
290     RELEASE_ASSERT(linkBuffer.isValid());
291     linkBuffer.link(jump, target);
292
293     FINALIZE_CODE(linkBuffer, NoPtrTag, "InlineAccess: linking constant jump");
294 }
295
296 } // namespace JSC
297
298 #endif // ENABLE(JIT)