2 * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "InlineAccess.h"
31 #include "CCallHelpers.h"
33 #include "JSCellInlines.h"
34 #include "LinkBuffer.h"
35 #include "ScratchRegisterAllocator.h"
36 #include "Structure.h"
37 #include "StructureStubInfo.h"
41 void InlineAccess::dumpCacheSizesAndCrash()
43 GPRReg base = GPRInfo::regT0;
44 GPRReg value = GPRInfo::regT1;
46 JSValueRegs regs(base, value);
48 JSValueRegs regs(base);
54 GPRReg scratchGPR = value;
55 jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), value);
56 jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), value);
57 jit.patchableBranch32(
58 CCallHelpers::NotEqual, value, CCallHelpers::TrustedImm32(IsArray | ContiguousShape));
59 jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value);
60 jit.load32(CCallHelpers::Address(value, ArrayStorage::lengthOffset()), value);
61 jit.boxInt32(scratchGPR, regs);
63 dataLog("array length size: ", jit.m_assembler.buffer().codeSize(), "\n");
69 jit.patchableBranch32(
70 MacroAssembler::NotEqual,
71 MacroAssembler::Address(base, JSCell::structureIDOffset()),
72 MacroAssembler::TrustedImm32(0x000ab21ca));
74 CCallHelpers::Address(base, JSObject::butterflyOffset()),
76 GPRReg storageGPR = value;
78 CCallHelpers::Address(storageGPR, 0x000ab21ca), regs);
80 dataLog("out of line offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
86 jit.patchableBranch32(
87 MacroAssembler::NotEqual,
88 MacroAssembler::Address(base, JSCell::structureIDOffset()),
89 MacroAssembler::TrustedImm32(0x000ab21ca));
91 MacroAssembler::Address(base, 0x000ab21ca), regs);
93 dataLog("inline offset cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
99 jit.patchableBranch32(
100 MacroAssembler::NotEqual,
101 MacroAssembler::Address(base, JSCell::structureIDOffset()),
102 MacroAssembler::TrustedImm32(0x000ab21ca));
105 regs, MacroAssembler::Address(base, 0x000ab21ca));
107 dataLog("replace cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
113 jit.patchableBranch32(
114 MacroAssembler::NotEqual,
115 MacroAssembler::Address(base, JSCell::structureIDOffset()),
116 MacroAssembler::TrustedImm32(0x000ab21ca));
118 jit.loadPtr(MacroAssembler::Address(base, JSObject::butterflyOffset()), value);
121 MacroAssembler::Address(base, 120342));
123 dataLog("replace out of line cache size: ", jit.m_assembler.buffer().codeSize(), "\n");
130 template <typename Function>
131 ALWAYS_INLINE static bool linkCodeInline(const char* name, CCallHelpers& jit, StructureStubInfo& stubInfo, const Function& function)
133 if (jit.m_assembler.buffer().codeSize() <= stubInfo.patch.inlineSize) {
134 bool needsBranchCompaction = false;
135 LinkBuffer linkBuffer(jit, stubInfo.patch.start, stubInfo.patch.inlineSize, JITCompilationMustSucceed, needsBranchCompaction);
136 ASSERT(linkBuffer.isValid());
137 function(linkBuffer);
138 FINALIZE_CODE(linkBuffer, NoPtrTag, "InlineAccessType: '%s'", name);
142 // This is helpful when determining the size for inline ICs on various
143 // platforms. You want to choose a size that usually succeeds, but sometimes
144 // there may be variability in the length of the code we generate just because
145 // of randomness. It's helpful to flip this on when running tests or browsing
146 // the web just to see how often it fails. You don't want an IC size that always fails.
147 const bool failIfCantInline = false;
148 if (failIfCantInline) {
149 dataLog("Failure for: ", name, "\n");
150 dataLog("real size: ", jit.m_assembler.buffer().codeSize(), " inline size:", stubInfo.patch.inlineSize, "\n");
157 bool InlineAccess::generateSelfPropertyAccess(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
161 GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
162 JSValueRegs value = stubInfo.valueRegs();
164 auto branchToSlowPath = jit.patchableBranch32(
165 MacroAssembler::NotEqual,
166 MacroAssembler::Address(base, JSCell::structureIDOffset()),
167 MacroAssembler::TrustedImm32(bitwise_cast<uint32_t>(structure->id())));
169 if (isInlineOffset(offset))
172 jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
173 storage = value.payloadGPR();
177 MacroAssembler::Address(storage, offsetRelativeToBase(offset)), value);
179 bool linkedCodeInline = linkCodeInline("property access", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
180 linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
182 return linkedCodeInline;
185 ALWAYS_INLINE static GPRReg getScratchRegister(StructureStubInfo& stubInfo)
187 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
188 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseGPR));
189 allocator.lock(static_cast<GPRReg>(stubInfo.patch.valueGPR));
190 #if USE(JSVALUE32_64)
191 allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
192 allocator.lock(static_cast<GPRReg>(stubInfo.patch.valueTagGPR));
194 GPRReg scratch = allocator.allocateScratchGPR();
195 if (allocator.didReuseRegisters())
196 return InvalidGPRReg;
200 ALWAYS_INLINE static bool hasFreeRegister(StructureStubInfo& stubInfo)
202 return getScratchRegister(stubInfo) != InvalidGPRReg;
205 bool InlineAccess::canGenerateSelfPropertyReplace(StructureStubInfo& stubInfo, PropertyOffset offset)
207 if (isInlineOffset(offset))
210 return hasFreeRegister(stubInfo);
213 bool InlineAccess::generateSelfPropertyReplace(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
215 ASSERT(canGenerateSelfPropertyReplace(stubInfo, offset));
219 GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
220 JSValueRegs value = stubInfo.valueRegs();
222 auto branchToSlowPath = jit.patchableBranch32(
223 MacroAssembler::NotEqual,
224 MacroAssembler::Address(base, JSCell::structureIDOffset()),
225 MacroAssembler::TrustedImm32(bitwise_cast<uint32_t>(structure->id())));
228 if (isInlineOffset(offset))
231 storage = getScratchRegister(stubInfo);
232 ASSERT(storage != InvalidGPRReg);
233 jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), storage);
237 value, MacroAssembler::Address(storage, offsetRelativeToBase(offset)));
239 bool linkedCodeInline = linkCodeInline("property replace", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
240 linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
242 return linkedCodeInline;
245 bool InlineAccess::isCacheableArrayLength(StructureStubInfo& stubInfo, JSArray* array)
247 ASSERT(array->indexingType() & IsArray);
249 if (!hasFreeRegister(stubInfo))
252 return array->indexingType() == ArrayWithInt32
253 || array->indexingType() == ArrayWithDouble
254 || array->indexingType() == ArrayWithContiguous;
257 bool InlineAccess::generateArrayLength(StructureStubInfo& stubInfo, JSArray* array)
259 ASSERT(isCacheableArrayLength(stubInfo, array));
263 GPRReg base = static_cast<GPRReg>(stubInfo.patch.baseGPR);
264 JSValueRegs value = stubInfo.valueRegs();
265 GPRReg scratch = getScratchRegister(stubInfo);
267 jit.load8(CCallHelpers::Address(base, JSCell::indexingTypeAndMiscOffset()), scratch);
268 jit.and32(CCallHelpers::TrustedImm32(IsArray | IndexingShapeMask), scratch);
269 auto branchToSlowPath = jit.patchableBranch32(
270 CCallHelpers::NotEqual, scratch, CCallHelpers::TrustedImm32(array->indexingType()));
271 jit.loadPtr(CCallHelpers::Address(base, JSObject::butterflyOffset()), value.payloadGPR());
272 jit.load32(CCallHelpers::Address(value.payloadGPR(), ArrayStorage::lengthOffset()), value.payloadGPR());
273 jit.boxInt32(value.payloadGPR(), value);
275 bool linkedCodeInline = linkCodeInline("array length", jit, stubInfo, [&] (LinkBuffer& linkBuffer) {
276 linkBuffer.link(branchToSlowPath, stubInfo.slowPathStartLocation());
278 return linkedCodeInline;
281 void InlineAccess::rewireStubAsJump(StructureStubInfo& stubInfo, CodeLocationLabel<JITStubRoutinePtrTag> target)
285 auto jump = jit.jump();
287 // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
288 bool needsBranchCompaction = false;
289 LinkBuffer linkBuffer(jit, stubInfo.patch.start, jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
290 RELEASE_ASSERT(linkBuffer.isValid());
291 linkBuffer.link(jump, target);
293 FINALIZE_CODE(linkBuffer, NoPtrTag, "InlineAccess: linking constant jump");
298 #endif // ENABLE(JIT)