Switch int8_t to GPRReg in StructureStubInfo because sizeof(GPRReg) == sizeof(int8_t)
[WebKit-https.git] / Source / JavaScriptCore / bytecode / InlineAccess.cpp
1 /*
2  * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "InlineAccess.h"
28
29 #if ENABLE(JIT)
30
31 #include "CCallHelpers.h"
32 #include "JSArray.h"
33 #include "JSCellInlines.h"
34 #include "LinkBuffer.h"
35 #include "ScratchRegisterAllocator.h"
36 #include "Structure.h"
37 #include "StructureStubInfo.h"
38
39 namespace JSC {
40
41 void InlineAccess::dumpCacheSizesAndCrash()
42 {
43     GPRReg base = GPRInfo::regT0;
44     GPRReg value = GPRInfo::regT1;
45 #if USE(JSVALUE32_64)
46     JSValueRegs regs(base, value);
47 #else
48     JSValueRegs regs(base);
49 #endif
50     {
51         CCallHelpers jit;
52
53         jit.patchableBranch8(
54             CCallHelpers::NotEqual,
55             CCallHelpers::Address(base, JSCell::typeInfoTypeOffset()),
56             CCallHelpers::TrustedImm32(StringType));
57         jit.load32(CCallHelpers::Address(base, JSString::offsetOfLength()), regs.payloadGPR());
58         jit.boxInt32(regs.payloadGPR(), regs);
59
60         dataLog("string length size: ", jit.m_assembler.buffer().codeSize(), "\n");
61     }
62
63     {
64         CCallHelpers jit;
65
66         GPRReg scratchGPR = value;
67         jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), value);
68         jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), value);
69         jit.patchableBranch32(
70             CCallHelpers::NotEqual, value, CCallHelpers::TrustedImm32(IsArray | ContiguousShape));
71         jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value);
72         jit.load32(CCallHelpers::Address(value, ArrayStorage::lengthOffset()), value);
73         jit.boxInt32(scratchGPR, regs);
74
75         dataLog("array length size: ", jit.m_assembler.buffer().codeSize(), "\n");
76     }
77
78     {
79         CCallHelpers jit;
80
81         jit.patchableBranch32(
82             MacroAssembler::NotEqual,
83             MacroAssembler::Address(base, JSCell::structureIDOffset()),
84             MacroAssembler::TrustedImm32(0x000ab21ca));
85         jit.loadPtr(
86             CCallHelpers::Address(base, JSObject::butterflyOffset()),
87             value);
88         GPRReg storageGPR = value;
89         jit.loadValue(
90             CCallHelpers::Address(storageGPR, 0x000ab21ca), regs);
91
92         dataLog("out of line offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
93     }
94
95     {
96         CCallHelpers jit;
97
98         jit.patchableBranch32(
99             MacroAssembler::NotEqual,
100             MacroAssembler::Address(base, JSCell::structureIDOffset()),
101             MacroAssembler::TrustedImm32(0x000ab21ca));
102         jit.loadValue(
103             MacroAssembler::Address(base, 0x000ab21ca), regs);
104
105         dataLog("inline offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
106     }
107
108     {
109         CCallHelpers jit;
110
111         jit.patchableBranch32(
112             MacroAssembler::NotEqual,
113             MacroAssembler::Address(base, JSCell::structureIDOffset()),
114             MacroAssembler::TrustedImm32(0x000ab21ca));
115
116         jit.storeValue(
117             regs, MacroAssembler::Address(base, 0x000ab21ca));
118
119         dataLog("replace cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
120     }
121
122     {
123         CCallHelpers jit;
124
125         jit.patchableBranch32(
126             MacroAssembler::NotEqual,
127             MacroAssembler::Address(base, JSCell::structureIDOffset()),
128             MacroAssembler::TrustedImm32(0x000ab21ca));
129
130         jit.loadPtr(MacroAssembler::Address(base, JSObject::butterflyOffset()), value);
131         jit.storeValue(
132             regs,
133             MacroAssembler::Address(base, 120342));
134
135         dataLog("replace out of line cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
136     }
137
138     CRASH();
139 }
140
141
142 template <typename Function>
143 ALWAYS_INLINE static bool linkCodeInline(const char* name, CCallHelpers& jit, StructureStubInfo& stubInfo, const Function& function)
144 {
145     if (jit.m_assembler.buffer().codeSize() <= stubInfo.patch.inlineSize) {
146         bool needsBranchCompaction = false;
147         LinkBuffer linkBuffer(jit, stubInfo.patch.start, stubInfo.patch.inlineSize, JITCompilationMustSucceed, needsBranchCompaction);
148         ASSERT(linkBuffer.isValid());
149         function(linkBuffer);
150         FINALIZE_CODE(linkBuffer, NoPtrTag, "InlineAccessType: '%s'", name);
151         return true;
152     }
153
154     // This is helpful when determining the size for inline ICs on various
155     // platforms. You want to choose a size that usually succeeds, but sometimes
156     // there may be variability in the length of the code we generate just because
157     // of randomness. It's helpful to flip this on when running tests or browsing
158     // the web just to see how often it fails. You don't want an IC size that always fails.
159     const bool failIfCantInline = false;
160     if (failIfCantInline) {
161         dataLog("Failure for: ", name, "\n");
162         dataLog("real size: ", jit.m_assembler.buffer().codeSize(), " inline size:", stubInfo.patch.inlineSize, "\n");
163         CRASH();
164     }
165
166     return false;
167 }
168
169 bool InlineAccess::generateSelfPropertyAccess(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
170 {
171     CCallHelpers jit;
172     
173     GPRReg base = stubInfo.baseGPR();
174     JSValueRegs value = stubInfo.valueRegs();
175
176     auto branchToSlowPath = jit.patchableBranch32(
177         MacroAssembler::NotEqual,
178         MacroAssembler::Address(base, JSCell::structureIDOffset()),
179         MacroAssembler::TrustedImm32(bitwise_cast<uint32_t>(structure->id())));
180     GPRReg storage;
181     if (isInlineOffset(offset))
182         storage = base;
183     else {
184         jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
185         storage = value.payloadGPR();
186     }
187     
188     jit.loadValue(
189         MacroAssembler::Address(storage, offsetRelativeToBase(offset)), value);
190
191     bool linkedCodeInline = linkCodeInline("property access", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
192         linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
193     });
194     return linkedCodeInline;
195 }
196
197 ALWAYS_INLINE static GPRReg getScratchRegister(StructureStubInfo& stubInfo)
198 {
199     ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
200     allocator.lock(stubInfo.baseGPR());
201     allocator.lock(stubInfo.patch.valueGPR);
202 #if USE(JSVALUE32_64)
203     allocator.lock(stubInfo.patch.baseTagGPR);
204     allocator.lock(stubInfo.patch.valueTagGPR);
205 #endif
206     GPRReg scratch = allocator.allocateScratchGPR();
207     if (allocator.didReuseRegisters())
208         return InvalidGPRReg;
209     return scratch;
210 }
211
212 ALWAYS_INLINE static bool hasFreeRegister(StructureStubInfo& stubInfo)
213 {
214     return getScratchRegister(stubInfo) != InvalidGPRReg;
215 }
216
217 bool InlineAccess::canGenerateSelfPropertyReplace(StructureStubInfo& stubInfo, PropertyOffset offset)
218 {
219     if (isInlineOffset(offset))
220         return true;
221
222     return hasFreeRegister(stubInfo);
223 }
224
225 bool InlineAccess::generateSelfPropertyReplace(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
226 {
227     ASSERT(canGenerateSelfPropertyReplace(stubInfo, offset));
228
229     CCallHelpers jit;
230
231     GPRReg base = stubInfo.baseGPR();
232     JSValueRegs value = stubInfo.valueRegs();
233
234     auto branchToSlowPath = jit.patchableBranch32(
235         MacroAssembler::NotEqual,
236         MacroAssembler::Address(base, JSCell::structureIDOffset()),
237         MacroAssembler::TrustedImm32(bitwise_cast<uint32_t>(structure->id())));
238
239     GPRReg storage;
240     if (isInlineOffset(offset))
241         storage = base;
242     else {
243         storage = getScratchRegister(stubInfo);
244         ASSERT(storage != InvalidGPRReg);
245         jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), storage);
246     }
247
248     jit.storeValue(
249         value, MacroAssembler::Address(storage, offsetRelativeToBase(offset)));
250
251     bool linkedCodeInline = linkCodeInline("property replace", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
252         linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
253     });
254     return linkedCodeInline;
255 }
256
257 bool InlineAccess::isCacheableArrayLength(StructureStubInfo& stubInfo, JSArray* array)
258 {
259     ASSERT(array->indexingType() & IsArray);
260
261     if (!hasFreeRegister(stubInfo))
262         return false;
263
264     return !hasAnyArrayStorage(array->indexingType()) && array->indexingType() != ArrayClass;
265 }
266
267 bool InlineAccess::generateArrayLength(StructureStubInfo& stubInfo, JSArray* array)
268 {
269     ASSERT(isCacheableArrayLength(stubInfo, array));
270
271     CCallHelpers jit;
272
273     GPRReg base = stubInfo.baseGPR();
274     JSValueRegs value = stubInfo.valueRegs();
275     GPRReg scratch = getScratchRegister(stubInfo);
276
277     jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), scratch);
278     jit.and32(CCallHelpers::TrustedImm32(IndexingTypeMask), scratch);
279     auto branchToSlowPath = jit.patchableBranch32(
280         CCallHelpers::NotEqual, scratch, CCallHelpers::TrustedImm32(array->indexingType()));
281     jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
282     jit.load32(CCallHelpers::Address(value.payloadGPR(), ArrayStorage::lengthOffset()), value.payloadGPR());
283     jit.boxInt32(value.payloadGPR(), value);
284
285     bool linkedCodeInline = linkCodeInline("array length", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
286         linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
287     });
288     return linkedCodeInline;
289 }
290
291 bool InlineAccess::generateStringLength(StructureStubInfo& stubInfo)
292 {
293     CCallHelpers jit;
294
295     GPRReg base = stubInfo.baseGPR();
296     JSValueRegs value = stubInfo.valueRegs();
297
298     auto branchToSlowPath = jit.patchableBranch8(
299         CCallHelpers::NotEqual,
300         CCallHelpers::Address(base, JSCell::typeInfoTypeOffset()),
301         CCallHelpers::TrustedImm32(StringType));
302     jit.load32(CCallHelpers::Address(base, JSString::offsetOfLength()), value.payloadGPR());
303     jit.boxInt32(value.payloadGPR(), value);
304
305     bool linkedCodeInline = linkCodeInline("string length", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
306         linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
307     });
308     return linkedCodeInline;
309 }
310
311
312 bool InlineAccess::generateSelfInAccess(StructureStubInfo& stubInfo, Structure* structure)
313 {
314     CCallHelpers jit;
315
316     GPRReg base = stubInfo.baseGPR();
317     JSValueRegs value = stubInfo.valueRegs();
318
319     auto branchToSlowPath = jit.patchableBranch32(
320         MacroAssembler::NotEqual,
321         MacroAssembler::Address(base, JSCell::structureIDOffset()),
322         MacroAssembler::TrustedImm32(bitwise_cast<uint32_t>(structure->id())));
323     jit.boxBoolean(true, value);
324
325     bool linkedCodeInline = linkCodeInline("in access", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
326         linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
327     });
328     return linkedCodeInline;
329 }
330
331 void InlineAccess::rewireStubAsJump(StructureStubInfo& stubInfo, CodeLocationLabel<JITStubRoutinePtrTag> target)
332 {
333     CCallHelpers jit;
334
335     auto jump = jit.jump();
336
337     // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
338     bool needsBranchCompaction = false;
339     LinkBuffer linkBuffer(jit, stubInfo.patch.start, jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
340     RELEASE_ASSERT(linkBuffer.isValid());
341     linkBuffer.link(jump, target);
342
343     FINALIZE_CODE(linkBuffer, NoPtrTag, "InlineAccess: linking constant jump");
344 }
345
346 } // namespace JSC
347
348 #endif // ENABLE(JIT)