2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
34 #include "JSFunction.h"
35 #include "Interpreter.h"
36 #include "ResultType.h"
37 #include "SamplingTool.h"
47 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
49 void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned i, unsigned propertyAccessInstructionIndex)
51 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched.
52 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
53 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
54 // to jump back to if one of these trampolies finds a match.
56 emitGetVirtualRegister(baseVReg, X86::eax, i);
59 UNUSED_PARAM(propertyAccessInstructionIndex);
61 ASSERT(m_codeBlock->propertyAccessInstruction(propertyAccessInstructionIndex).bytecodeIndex == i);
64 JmpDst coldPathBegin = __ label();
66 emitPutCTIArg(X86::eax, 0);
67 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
68 JmpSrc call = emitCTICall(i, Interpreter::cti_op_get_by_id_generic);
69 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
70 emitPutVirtualRegister(resultVReg);
72 // Track the location of the call; this will be used to recover repatch information.
73 ASSERT(m_codeBlock->propertyAccessInstruction(propertyAccessInstructionIndex).bytecodeIndex == i);
74 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
78 void JIT::compileGetByIdSlowCase(int, int, Identifier*, unsigned, Vector<SlowCaseEntry>::iterator&, unsigned)
83 void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned i, unsigned propertyAccessInstructionIndex)
85 // In order to be able to repatch both the Structure, and the object offset, we store one pointer,
86 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
87 // such that the Structure & offset are always at the same distance from this.
89 emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx, i);
91 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
92 emitPutCTIArg(X86::eax, 0);
93 emitPutCTIArg(X86::edx, 8);
94 JmpSrc call = emitCTICall(i, Interpreter::cti_op_put_by_id_generic);
96 // Track the location of the call; this will be used to recover repatch information.
97 ASSERT(m_codeBlock->propertyAccessInstruction(propertyAccessInstructionIndex).bytecodeIndex == i);
98 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
101 void JIT::compilePutByIdSlowCase(int, Identifier*, int, unsigned, Vector<SlowCaseEntry>::iterator&, unsigned)
103 ASSERT_NOT_REACHED();
108 void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned i, unsigned propertyAccessInstructionIndex)
110 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched.
111 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
112 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
113 // to jump back to if one of these trampolies finds a match.
115 emitGetVirtualRegister(baseVReg, X86::eax, i);
117 ASSERT(m_codeBlock->propertyAccessInstruction(propertyAccessInstructionIndex).bytecodeIndex == i);
119 emitJumpSlowCaseIfNotJSCell(X86::eax, i, baseVReg);
121 JmpDst hotPathBegin = __ label();
122 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
124 __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
125 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdStructure);
126 m_slowCases.append(SlowCaseEntry(__ jne(), i));
127 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdBranchToSlowCase);
129 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
130 __ movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::eax);
131 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdPropertyMapOffset);
132 emitPutVirtualRegister(resultVReg);
136 void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, unsigned i, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
138 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
139 // so that we only need track one pointer into the slow case code - we track a pointer to the location
140 // of the call (which we can use to look up the repatch information), but should a array-length or
141 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
142 // the distance from the call to the head of the slow case.
144 if (linkSlowCaseIfNotJSCell(iter, baseVReg))
146 __ link(iter->from, __ label());
149 JmpDst coldPathBegin = __ label();
151 emitPutCTIArg(X86::eax, 0);
152 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
153 JmpSrc call = emitCTICall(i, Interpreter::cti_op_get_by_id);
154 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
155 emitPutVirtualRegister(resultVReg);
157 // Track the location of the call; this will be used to recover repatch information.
158 ASSERT(m_codeBlock->propertyAccessInstruction(propertyAccessInstructionIndex).bytecodeIndex == i);
159 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
162 void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsigned i, unsigned propertyAccessInstructionIndex)
164 // In order to be able to repatch both the Structure, and the object offset, we store one pointer,
165 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
166 // such that the Structure & offset are always at the same distance from this.
168 emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx, i);
170 ASSERT(m_codeBlock->propertyAccessInstruction(propertyAccessInstructionIndex).bytecodeIndex == i);
172 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
173 emitJumpSlowCaseIfNotJSCell(X86::eax, i, baseVReg);
175 JmpDst hotPathBegin = __ label();
176 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
178 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
179 __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
180 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdStructure);
181 m_slowCases.append(SlowCaseEntry(__ jne(), i));
183 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
184 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
185 __ movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
186 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdPropertyMapOffset);
189 void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, unsigned i, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
191 if (linkSlowCaseIfNotJSCell(iter, baseVReg))
193 __ link(iter->from, __ label());
195 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
196 emitPutCTIArg(X86::eax, 0);
197 emitPutCTIArg(X86::edx, 8);
198 JmpSrc call = emitCTICall(i, Interpreter::cti_op_put_by_id);
200 // Track the location of the call; this will be used to recover repatch information.
201 ASSERT(m_codeBlock->propertyAccessInstruction(propertyAccessInstructionIndex).bytecodeIndex == i);
202 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
207 static JSObject* resizePropertyStorage(JSObject* baseObject, size_t oldSize, size_t newSize)
209 baseObject->allocatePropertyStorageInline(oldSize, newSize);
213 static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure)
215 return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
218 void JIT::privateCompilePutByIdTransition(Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
220 Vector<JmpSrc, 16> failureCases;
221 // Check eax is an object of the right Structure.
222 __ testl_i32r(JSImmediate::TagMask, X86::eax);
223 failureCases.append(__ jne());
224 __ cmpl_i32m(reinterpret_cast<uint32_t>(oldStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
225 failureCases.append(__ jne());
226 Vector<JmpSrc> successCases;
229 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
230 // proto(ecx) = baseObject->structure()->prototype()
231 __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
232 failureCases.append(__ jne());
233 __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
235 // ecx = baseObject->m_structure
236 for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
237 // null check the prototype
238 __ cmpl_i32r(asInteger(jsNull()), X86::ecx);
239 successCases.append(__ je());
241 // Check the structure id
242 __ cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), FIELD_OFFSET(JSCell, m_structure), X86::ecx);
243 failureCases.append(__ jne());
245 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::ecx, X86::ecx);
246 __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
247 failureCases.append(__ jne());
248 __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
251 failureCases.append(__ jne());
252 for (unsigned i = 0; i < successCases.size(); ++i)
253 __ link(successCases[i], __ label());
257 // emit a call only if storage realloc is needed
258 if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) {
259 __ pushl_r(X86::edx);
260 __ pushl_i32(newStructure->propertyStorageCapacity());
261 __ pushl_i32(oldStructure->propertyStorageCapacity());
262 __ pushl_r(X86::eax);
263 callTarget = __ call();
264 __ addl_i32r(3 * sizeof(void*), X86::esp);
268 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
269 // codeblock should ensure oldStructure->m_refCount > 0
270 __ subl_i8m(1, reinterpret_cast<void*>(oldStructure));
271 __ addl_i8m(1, reinterpret_cast<void*>(newStructure));
272 __ movl_i32m(reinterpret_cast<uint32_t>(newStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
275 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
276 __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
281 if (failureCases.size()) {
282 for (unsigned i = 0; i < failureCases.size(); ++i)
283 __ link(failureCases[i], __ label());
284 restoreArgumentReferenceForTrampoline();
285 failureJump = __ jmp();
288 void* code = __ executableCopy();
290 if (failureCases.size())
291 X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
293 if (transitionWillNeedStorageRealloc(oldStructure, newStructure))
294 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(resizePropertyStorage));
296 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
298 ctiRepatchCallByReturnAddress(returnAddress, code);
301 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, Structure* structure, size_t cachedOffset, void* returnAddress)
303 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
305 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
306 // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
307 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
309 // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for.
310 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
311 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructure, reinterpret_cast<uint32_t>(structure));
314 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, Structure* structure, size_t cachedOffset, void* returnAddress)
316 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
318 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
319 // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
320 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic));
322 // Repatch the offset into the propoerty map to load from, then repatch the Structure to look for.
323 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
324 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructure, reinterpret_cast<uint32_t>(structure));
327 void JIT::privateCompilePatchGetArrayLength(void* returnAddress)
329 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
331 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
332 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
334 // Check eax is an array
335 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), X86::eax);
336 JmpSrc failureCases1 = __ jne();
338 // Checks out okay! - get the length from the storage
339 __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
340 __ movl_mr(FIELD_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
342 __ cmpl_i32r(JSImmediate::maxImmediateInt, X86::ecx);
343 JmpSrc failureCases2 = __ ja();
345 __ addl_rr(X86::ecx, X86::ecx);
346 __ addl_i8r(1, X86::ecx);
347 __ movl_rr(X86::ecx, X86::eax);
348 JmpSrc success = __ jmp();
350 void* code = __ executableCopy();
352 // Use the repatch information to link the failure cases back to the original slow case routine.
353 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
354 X86Assembler::link(code, failureCases1, slowCaseBegin);
355 X86Assembler::link(code, failureCases2, slowCaseBegin);
357 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
358 intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
359 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
361 // Track the stub we have created so that it will be deleted later.
362 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
364 // Finally repatch the jump to sow case back in the hot path to jump here instead.
365 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
366 X86Assembler::repatchBranchOffset(jmpLocation, code);
369 void JIT::privateCompileGetByIdSelf(Structure* structure, size_t cachedOffset, void* returnAddress)
371 // Check eax is an object of the right Structure.
372 __ testl_i32r(JSImmediate::TagMask, X86::eax);
373 JmpSrc failureCases1 = __ jne();
374 JmpSrc failureCases2 = checkStructure(X86::eax, structure);
376 // Checks out okay! - getDirectOffset
377 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
378 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
381 void* code = __ executableCopy();
383 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
384 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
386 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
388 ctiRepatchCallByReturnAddress(returnAddress, code);
391 void JIT::privateCompileGetByIdProto(Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
393 #if USE(CTI_REPATCH_PIC)
394 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
396 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
397 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
399 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
400 // referencing the prototype object - let's speculatively load it's table nice and early!)
401 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
402 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
403 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
405 // Check eax is an object of the right Structure.
406 JmpSrc failureCases1 = checkStructure(X86::eax, structure);
408 // Check the prototype object's Structure had not changed.
409 Structure** prototypeStructureAddress = &(protoObject->m_structure);
410 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
411 JmpSrc failureCases2 = __ jne();
413 // Checks out okay! - getDirectOffset
414 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
416 JmpSrc success = __ jmp();
418 void* code = __ executableCopy();
420 // Use the repatch information to link the failure cases back to the original slow case routine.
421 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
422 X86Assembler::link(code, failureCases1, slowCaseBegin);
423 X86Assembler::link(code, failureCases2, slowCaseBegin);
425 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
426 intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
427 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
429 // Track the stub we have created so that it will be deleted later.
430 info.stubRoutine = code;
432 // Finally repatch the jump to slow case back in the hot path to jump here instead.
433 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
434 X86Assembler::repatchBranchOffset(jmpLocation, code);
436 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
437 // referencing the prototype object - let's speculatively load it's table nice and early!)
438 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
439 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
440 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
442 // Check eax is an object of the right Structure.
443 __ testl_i32r(JSImmediate::TagMask, X86::eax);
444 JmpSrc failureCases1 = __ jne();
445 JmpSrc failureCases2 = checkStructure(X86::eax, structure);
447 // Check the prototype object's Structure had not changed.
448 Structure** prototypeStructureAddress = &(protoObject->m_structure);
449 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
450 JmpSrc failureCases3 = __ jne();
452 // Checks out okay! - getDirectOffset
453 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
457 void* code = __ executableCopy();
459 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
460 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
461 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
463 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
465 ctiRepatchCallByReturnAddress(returnAddress, code);
469 #if USE(CTI_REPATCH_PIC)
470 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
472 JmpSrc failureCase = checkStructure(X86::eax, structure);
473 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
474 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
475 JmpSrc success = __ jmp();
477 void* code = __ executableCopy();
480 // Use the repatch information to link the failure cases back to the original slow case routine.
481 void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
483 lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
485 X86Assembler::link(code, failureCase, lastProtoBegin);
487 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
488 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
489 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
492 polymorphicStructures->list[currentIndex].set(cachedOffset, code, structure);
494 // Finally repatch the jump to slow case back in the hot path to jump here instead.
495 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
496 X86Assembler::repatchBranchOffset(jmpLocation, code);
499 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
501 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
502 // referencing the prototype object - let's speculatively load it's table nice and early!)
503 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
504 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
505 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
507 // Check eax is an object of the right Structure.
508 JmpSrc failureCases1 = checkStructure(X86::eax, structure);
510 // Check the prototype object's Structure had not changed.
511 Structure** prototypeStructureAddress = &(protoObject->m_structure);
512 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
513 JmpSrc failureCases2 = __ jne();
515 // Checks out okay! - getDirectOffset
516 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
518 JmpSrc success = __ jmp();
520 void* code = __ executableCopy();
522 // Use the repatch information to link the failure cases back to the original slow case routine.
523 void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
524 X86Assembler::link(code, failureCases1, lastProtoBegin);
525 X86Assembler::link(code, failureCases2, lastProtoBegin);
527 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
528 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
529 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
532 prototypeStructure->ref();
533 prototypeStructures->list[currentIndex].set(cachedOffset, code, structure, prototypeStructure);
535 // Finally repatch the jump to slow case back in the hot path to jump here instead.
536 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
537 X86Assembler::repatchBranchOffset(jmpLocation, code);
540 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
544 Vector<JmpSrc> bucketsOfFail;
546 // Check eax is an object of the right Structure.
547 bucketsOfFail.append(checkStructure(X86::eax, structure));
549 Structure* currStructure = structure;
550 RefPtr<Structure>* chainEntries = chain->head();
551 JSObject* protoObject = 0;
552 for (unsigned i = 0; i < count; ++i) {
553 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
554 currStructure = chainEntries[i].get();
556 // Check the prototype object's Structure had not changed.
557 Structure** prototypeStructureAddress = &(protoObject->m_structure);
558 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
559 bucketsOfFail.append(__ jne());
563 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
564 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
565 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
566 JmpSrc success = __ jmp();
568 void* code = __ executableCopy();
570 // Use the repatch information to link the failure cases back to the original slow case routine.
571 void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
573 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
574 X86Assembler::link(code, bucketsOfFail[i], lastProtoBegin);
576 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
577 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
578 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
580 // Track the stub we have created so that it will be deleted later.
583 prototypeStructures->list[currentIndex].set(cachedOffset, code, structure, chain);
585 // Finally repatch the jump to slow case back in the hot path to jump here instead.
586 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
587 X86Assembler::repatchBranchOffset(jmpLocation, code);
591 void JIT::privateCompileGetByIdChain(Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
593 #if USE(CTI_REPATCH_PIC)
594 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
596 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
597 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
601 Vector<JmpSrc> bucketsOfFail;
603 // Check eax is an object of the right Structure.
604 bucketsOfFail.append(checkStructure(X86::eax, structure));
606 Structure* currStructure = structure;
607 RefPtr<Structure>* chainEntries = chain->head();
608 JSObject* protoObject = 0;
609 for (unsigned i = 0; i < count; ++i) {
610 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
611 currStructure = chainEntries[i].get();
613 // Check the prototype object's Structure had not changed.
614 Structure** prototypeStructureAddress = &(protoObject->m_structure);
615 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
616 bucketsOfFail.append(__ jne());
620 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
621 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
622 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
623 JmpSrc success = __ jmp();
625 void* code = __ executableCopy();
627 // Use the repatch information to link the failure cases back to the original slow case routine.
628 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
630 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
631 X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin);
633 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
634 intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
635 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
637 // Track the stub we have created so that it will be deleted later.
638 info.stubRoutine = code;
640 // Finally repatch the jump to slow case back in the hot path to jump here instead.
641 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
642 X86Assembler::repatchBranchOffset(jmpLocation, code);
646 Vector<JmpSrc> bucketsOfFail;
648 // Check eax is an object of the right Structure.
649 __ testl_i32r(JSImmediate::TagMask, X86::eax);
650 bucketsOfFail.append(__ jne());
651 bucketsOfFail.append(checkStructure(X86::eax, structure));
653 Structure* currStructure = structure;
654 RefPtr<Structure>* chainEntries = chain->head();
655 JSObject* protoObject = 0;
656 for (unsigned i = 0; i < count; ++i) {
657 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
658 currStructure = chainEntries[i].get();
660 // Check the prototype object's Structure had not changed.
661 Structure** prototypeStructureAddress = &(protoObject->m_structure);
662 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
663 bucketsOfFail.append(__ jne());
667 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
668 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
669 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
672 void* code = __ executableCopy();
674 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
675 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
677 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
679 ctiRepatchCallByReturnAddress(returnAddress, code);
683 void JIT::privateCompilePutByIdReplace(Structure* structure, size_t cachedOffset, void* returnAddress)
685 // Check eax is an object of the right Structure.
686 __ testl_i32r(JSImmediate::TagMask, X86::eax);
687 JmpSrc failureCases1 = __ jne();
688 JmpSrc failureCases2 = checkStructure(X86::eax, structure);
690 // checks out okay! - putDirectOffset
691 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
692 __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
695 void* code = __ executableCopy();
697 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
698 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
700 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
702 ctiRepatchCallByReturnAddress(returnAddress, code);
707 #endif // ENABLE(JIT)