b6827855a14fce868aa6c2ae882f2dbf29e50a92
[WebKit-https.git] / Source / JavaScriptCore / jit / JITPropertyAccess.cpp
1 /*
2  * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 #include "CodeBlock.h"
32 #include "GetterSetter.h"
33 #include "Interpreter.h"
34 #include "JITInlineMethods.h"
35 #include "JITStubCall.h"
36 #include "JSArray.h"
37 #include "JSFunction.h"
38 #include "JSPropertyNameIterator.h"
39 #include "JSVariableObject.h"
40 #include "LinkBuffer.h"
41 #include "RepatchBuffer.h"
42 #include "ResultType.h"
43 #include "SamplingTool.h"
44
45 #ifndef NDEBUG
46 #include <stdio.h>
47 #endif
48
49 using namespace std;
50
51 namespace JSC {
52 #if USE(JSVALUE64)
53
54 JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
55 {
56     JSInterfaceJIT jit;
57     JumpList failures;
58     failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
59
60     // Load string length to regT2, and start the process of loading the data pointer into regT0
61     jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
62     jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
63     failures.append(jit.branchTest32(Zero, regT0));
64
65     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
66     failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
67     
68     // Load the character
69     JumpList is16Bit;
70     JumpList cont8Bit;
71     // Load the string flags
72     jit.loadPtr(Address(regT0, ThunkHelpers::stringImplFlagsOffset()), regT2);
73     jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
74     is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
75     jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
76     cont8Bit.append(jit.jump());
77     is16Bit.link(&jit);
78     jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
79     cont8Bit.link(&jit);
80
81     failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
82     jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
83     jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
84     jit.ret();
85     
86     failures.link(&jit);
87     jit.move(TrustedImm32(0), regT0);
88     jit.ret();
89     
90     LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
91     return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
92 }
93
94 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
95 {
96     unsigned dst = currentInstruction[1].u.operand;
97     unsigned base = currentInstruction[2].u.operand;
98     unsigned property = currentInstruction[3].u.operand;
99
100     emitGetVirtualRegisters(base, regT0, property, regT1);
101     emitJumpSlowCaseIfNotImmediateInteger(regT1);
102
103     // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
104     // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
105     // number was signed since m_vectorLength is always less than intmax (since the total allocation
106     // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
107     // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
108     // extending since it makes it easier to re-tag the value in the slow case.
109     zeroExtend32ToPtr(regT1, regT1);
110
111     emitJumpSlowCaseIfNotJSCell(regT0, base);
112     addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
113
114     loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
115     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
116
117     loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
118     addSlowCase(branchTestPtr(Zero, regT0));
119
120     emitValueProfilingSite();
121     emitPutVirtualRegister(dst);
122 }
123
124 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
125 {
126     unsigned dst = currentInstruction[1].u.operand;
127     unsigned base = currentInstruction[2].u.operand;
128     unsigned property = currentInstruction[3].u.operand;
129     
130     linkSlowCase(iter); // property int32 check
131     linkSlowCaseIfNotJSCell(iter, base); // base cell check
132     Jump nonCell = jump();
133     linkSlowCase(iter); // base array check
134     Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info));
135     emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
136     Jump failed = branchTestPtr(Zero, regT0);
137     emitPutVirtualRegister(dst, regT0);
138     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
139     failed.link(this);
140     notString.link(this);
141     nonCell.link(this);
142     
143     linkSlowCase(iter); // vector length check
144     linkSlowCase(iter); // empty value
145     
146     JITStubCall stubCall(this, cti_op_get_by_val);
147     stubCall.addArgument(base, regT2);
148     stubCall.addArgument(property, regT2);
149     stubCall.call(dst);
150
151     emitValueProfilingSite();
152 }
153
154 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
155 {
156     ASSERT(sizeof(JSValue) == 8);
157     
158     if (finalObjectMode == MayBeFinal) {
159         Jump isInline = branch32(LessThan, offset, TrustedImm32(inlineStorageCapacity));
160         loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), scratch);
161         Jump done = jump();
162         isInline.link(this);
163         addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() + inlineStorageCapacity * sizeof(EncodedJSValue)), base, scratch);
164         done.link(this);
165     } else {
166 #if !ASSERT_DISABLED
167         Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(inlineStorageCapacity));
168         breakpoint();
169         isOutOfLine.link(this);
170 #endif
171         loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), scratch);
172     }
173     loadPtr(BaseIndex(scratch, offset, ScalePtr, -inlineStorageCapacity * static_cast<ptrdiff_t>(sizeof(JSValue))), result);
174 }
175
176 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
177 {
178     unsigned dst = currentInstruction[1].u.operand;
179     unsigned base = currentInstruction[2].u.operand;
180     unsigned property = currentInstruction[3].u.operand;
181     unsigned expected = currentInstruction[4].u.operand;
182     unsigned iter = currentInstruction[5].u.operand;
183     unsigned i = currentInstruction[6].u.operand;
184
185     emitGetVirtualRegister(property, regT0);
186     addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
187     emitGetVirtualRegisters(base, regT0, iter, regT1);
188     emitJumpSlowCaseIfNotJSCell(regT0, base);
189
190     // Test base's structure
191     loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
192     addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
193     load32(addressFor(i), regT3);
194     sub32(TrustedImm32(1), regT3);
195     addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
196     add32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_offsetBase)), regT3);
197     compileGetDirectOffset(regT0, regT0, regT3, regT1);
198
199     emitPutVirtualRegister(dst, regT0);
200 }
201
202 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
203 {
204     unsigned dst = currentInstruction[1].u.operand;
205     unsigned base = currentInstruction[2].u.operand;
206     unsigned property = currentInstruction[3].u.operand;
207
208     linkSlowCase(iter);
209     linkSlowCaseIfNotJSCell(iter, base);
210     linkSlowCase(iter);
211     linkSlowCase(iter);
212
213     JITStubCall stubCall(this, cti_op_get_by_val);
214     stubCall.addArgument(base, regT2);
215     stubCall.addArgument(property, regT2);
216     stubCall.call(dst);
217 }
218
219 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
220 {
221     unsigned base = currentInstruction[1].u.operand;
222     unsigned property = currentInstruction[2].u.operand;
223     unsigned value = currentInstruction[3].u.operand;
224
225     emitGetVirtualRegisters(base, regT0, property, regT1);
226     emitJumpSlowCaseIfNotImmediateInteger(regT1);
227     // See comment in op_get_by_val.
228     zeroExtend32ToPtr(regT1, regT1);
229     emitJumpSlowCaseIfNotJSCell(regT0, base);
230     addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
231     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
232
233     loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
234     Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
235
236     Label storeResult(this);
237     emitGetVirtualRegister(value, regT3);
238     storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
239     Jump end = jump();
240     
241     empty.link(this);
242     add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
243     branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
244
245     add32(TrustedImm32(1), regT1);
246     store32(regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
247     sub32(TrustedImm32(1), regT1);
248     jump().linkTo(storeResult, this);
249
250     end.link(this);
251
252     emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
253 }
254
255 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
256 {
257     unsigned base = currentInstruction[1].u.operand;
258     unsigned property = currentInstruction[2].u.operand;
259     unsigned value = currentInstruction[3].u.operand;
260
261     linkSlowCase(iter); // property int32 check
262     linkSlowCaseIfNotJSCell(iter, base); // base cell check
263     linkSlowCase(iter); // base not array check
264     linkSlowCase(iter); // in vector check
265
266     JITStubCall stubPutByValCall(this, cti_op_put_by_val);
267     stubPutByValCall.addArgument(regT0);
268     stubPutByValCall.addArgument(property, regT2);
269     stubPutByValCall.addArgument(value, regT2);
270     stubPutByValCall.call();
271 }
272
273 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
274 {
275     JITStubCall stubCall(this, cti_op_put_by_index);
276     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
277     stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
278     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
279     stubCall.call();
280 }
281
282 void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
283 {
284     JITStubCall stubCall(this, cti_op_put_getter_setter);
285     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
286     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
287     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
288     stubCall.addArgument(currentInstruction[4].u.operand, regT2);
289     stubCall.call();
290 }
291
292 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
293 {
294     JITStubCall stubCall(this, cti_op_del_by_id);
295     stubCall.addArgument(currentInstruction[2].u.operand, regT2);
296     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
297     stubCall.call(currentInstruction[1].u.operand);
298 }
299
300 void JIT::emit_op_method_check(Instruction* currentInstruction)
301 {
302     // Assert that the following instruction is a get_by_id.
303     ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id
304         || m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id_out_of_line);
305
306     currentInstruction += OPCODE_LENGTH(op_method_check);
307     unsigned resultVReg = currentInstruction[1].u.operand;
308     unsigned baseVReg = currentInstruction[2].u.operand;
309     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
310
311     emitGetVirtualRegister(baseVReg, regT0);
312
313     // Do the method check - check the object & its prototype's structure inline (this is the common case).
314     m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_bytecodeOffset, m_propertyAccessCompilationInfo.size()));
315     MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
316
317     Jump notCell = emitJumpIfNotJSCell(regT0);
318
319     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
320
321     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
322     DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT1);
323     Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
324
325     // This will be relinked to load the function without doing a load.
326     DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
327
328     END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
329
330     Jump match = jump();
331
332     // Link the failure cases here.
333     notCell.link(this);
334     structureCheck.link(this);
335     protoStructureCheck.link(this);
336
337     // Do a regular(ish) get_by_id (the slow case will be link to
338     // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
339     compileGetByIdHotPath(baseVReg, ident);
340
341     match.link(this);
342     emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check));
343     emitPutVirtualRegister(resultVReg);
344
345     // We've already generated the following get_by_id, so make sure it's skipped over.
346     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
347
348     m_propertyAccessCompilationInfo.last().addMethodCheckInfo(info.structureToCompare, protoObj, protoStructureToCompare, putFunction);
349 }
350
351 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
352 {
353     currentInstruction += OPCODE_LENGTH(op_method_check);
354     unsigned resultVReg = currentInstruction[1].u.operand;
355     unsigned baseVReg = currentInstruction[2].u.operand;
356     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
357
358     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
359     emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check));
360
361     // We've already generated the following get_by_id, so make sure it's skipped over.
362     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
363 }
364
365 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
366 {
367     unsigned resultVReg = currentInstruction[1].u.operand;
368     unsigned baseVReg = currentInstruction[2].u.operand;
369     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
370
371     emitGetVirtualRegister(baseVReg, regT0);
372     compileGetByIdHotPath(baseVReg, ident);
373     emitValueProfilingSite();
374     emitPutVirtualRegister(resultVReg);
375 }
376
377 void JIT::compileGetByIdHotPath(int baseVReg, Identifier*)
378 {
379     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
380     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
381     // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
382     // to jump back to if one of these trampolies finds a match.
383
384     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
385
386     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
387
388     Label hotPathBegin(this);
389
390     DataLabelPtr structureToCompare;
391     PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
392     addSlowCase(structureCheck);
393
394     ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT0);
395     DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
396
397     Label putResult(this);
398
399     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
400
401     m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel, putResult));
402 }
403
404 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
405 {
406     unsigned resultVReg = currentInstruction[1].u.operand;
407     unsigned baseVReg = currentInstruction[2].u.operand;
408     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
409
410     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
411     emitValueProfilingSite();
412 }
413
414 void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
415 {
416     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
417     // so that we only need track one pointer into the slow case code - we track a pointer to the location
418     // of the call (which we can use to look up the patch information), but should a array-length or
419     // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
420     // the distance from the call to the head of the slow case.
421
422     linkSlowCaseIfNotJSCell(iter, baseVReg);
423     linkSlowCase(iter);
424
425     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
426
427     Label coldPathBegin(this);
428     JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
429     stubCall.addArgument(regT0);
430     stubCall.addArgument(TrustedImmPtr(ident));
431     Call call = stubCall.call(resultVReg);
432
433     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
434
435     // Track the location of the call; this will be used to recover patch information.
436     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call);
437 }
438
439 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
440 {
441     unsigned baseVReg = currentInstruction[1].u.operand;
442     unsigned valueVReg = currentInstruction[3].u.operand;
443
444     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
445     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
446     // such that the Structure & offset are always at the same distance from this.
447
448     emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
449
450     // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
451     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
452
453     BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
454
455     Label hotPathBegin(this);
456
457     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
458     DataLabelPtr structureToCompare;
459     addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
460
461     ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT2);
462     DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
463
464     END_UNINTERRUPTED_SEQUENCE(sequencePutById);
465
466     emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
467
468     m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel));
469 }
470
471 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
472 {
473     unsigned baseVReg = currentInstruction[1].u.operand;
474     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
475     unsigned direct = currentInstruction[8].u.operand;
476
477     linkSlowCaseIfNotJSCell(iter, baseVReg);
478     linkSlowCase(iter);
479
480     JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
481     stubCall.addArgument(regT0);
482     stubCall.addArgument(TrustedImmPtr(ident));
483     stubCall.addArgument(regT1);
484     Call call = stubCall.call();
485
486     // Track the location of the call; this will be used to recover patch information.
487     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call);
488 }
489
490 // Compile a store into an object's property storage.  May overwrite the
491 // value in objectReg.
492 void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
493 {
494     if (isInlineOffset(cachedOffset)) {
495         storePtr(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
496         return;
497     }
498     
499     loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), base);
500     storePtr(value, Address(base, sizeof(JSValue) * offsetInOutOfLineStorage(cachedOffset)));
501 }
502
503 // Compile a load from an object's property storage.  May overwrite base.
504 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
505 {
506     if (isInlineOffset(cachedOffset)) {
507         loadPtr(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
508         return;
509     }
510     
511     loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), result);
512     loadPtr(Address(result, sizeof(JSValue) * offsetInOutOfLineStorage(cachedOffset)), result);
513 }
514
515 void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
516 {
517     if (isInlineOffset(cachedOffset)) {
518         loadPtr(base->locationForOffset(cachedOffset), result);
519         return;
520     }
521     
522     loadPtr(base->addressOfOutOfLineStorage(), result);
523     loadPtr(Address(result, offsetInOutOfLineStorage(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
524 }
525
526 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
527 {
528     JumpList failureCases;
529     // Check eax is an object of the right Structure.
530     failureCases.append(emitJumpIfNotJSCell(regT0));
531     failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
532     
533     testPrototype(oldStructure->storedPrototype(), failureCases);
534     
535     ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get());
536
537     // ecx = baseObject->m_structure
538     if (!direct) {
539         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
540             ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get());
541             testPrototype((*it)->storedPrototype(), failureCases);
542         }
543     }
544
545     // If we succeed in all of our checks, and the code was optimizable, then make sure we
546     // decrement the rare case counter.
547 #if ENABLE(VALUE_PROFILER)
548     if (m_codeBlock->canCompileWithDFG() >= DFG::ShouldProfile) {
549         sub32(
550             TrustedImm32(1),
551             AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
552     }
553 #endif
554     
555     // emit a call only if storage realloc is needed
556     bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity();
557     if (willNeedStorageRealloc) {
558         // This trampoline was called to like a JIT stub; before we can can call again we need to
559         // remove the return address from the stack, to prevent the stack from becoming misaligned.
560         preserveReturnAddressAfterCall(regT3);
561  
562         JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
563         stubCall.skipArgument(); // base
564         stubCall.skipArgument(); // ident
565         stubCall.skipArgument(); // value
566         stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity()));
567         stubCall.addArgument(TrustedImmPtr(newStructure));
568         stubCall.call(regT0);
569         emitGetJITStubArg(2, regT1);
570
571         restoreReturnAddressBeforeReturn(regT3);
572     }
573
574     // Planting the new structure triggers the write barrier so we need
575     // an unconditional barrier here.
576     emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
577
578     ASSERT(newStructure->classInfo() == oldStructure->classInfo());
579     storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
580     compilePutDirectOffset(regT0, regT1, cachedOffset);
581
582     ret();
583     
584     ASSERT(!failureCases.empty());
585     failureCases.link(this);
586     restoreArgumentReferenceForTrampoline();
587     Call failureCall = tailRecursiveCall();
588
589     LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
590
591     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
592
593     if (willNeedStorageRealloc) {
594         ASSERT(m_calls.size() == 1);
595         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
596     }
597     
598     stubInfo->stubRoutine = FINALIZE_CODE(
599         patchBuffer,
600         ("Baseline put_by_id transition for CodeBlock %p, return point %p",
601          m_codeBlock, returnAddress.value()));
602     RepatchBuffer repatchBuffer(m_codeBlock);
603     repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
604 }
605
606 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
607 {
608     RepatchBuffer repatchBuffer(codeBlock);
609
610     // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
611     // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
612     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
613
614     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
615     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
616     repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
617     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
618 }
619
620 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct)
621 {
622     RepatchBuffer repatchBuffer(codeBlock);
623
624     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
625     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
626     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
627
628     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
629     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
630     repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
631     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
632 }
633
634 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
635 {
636     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
637
638     // Check eax is an array
639     Jump failureCases1 = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info));
640
641     // Checks out okay! - get the length from the storage
642     loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
643     load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
644     Jump failureCases2 = branch32(LessThan, regT2, TrustedImm32(0));
645
646     emitFastArithIntToImmNoCheck(regT2, regT0);
647     Jump success = jump();
648
649     LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
650
651     // Use the patch information to link the failure cases back to the original slow case routine.
652     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
653     patchBuffer.link(failureCases1, slowCaseBegin);
654     patchBuffer.link(failureCases2, slowCaseBegin);
655
656     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
657     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
658
659     // Track the stub we have created so that it will be deleted later.
660     stubInfo->stubRoutine = FINALIZE_CODE(
661         patchBuffer,
662         ("Basline JIT get_by_id array length stub for CodeBlock %p, return point %p",
663          m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
664              stubInfo->patch.baseline.u.get.putResult).executableAddress()));
665
666     // Finally patch the jump to slow case back in the hot path to jump here instead.
667     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
668     RepatchBuffer repatchBuffer(m_codeBlock);
669     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
670
671     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
672     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
673 }
674
675 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
676 {
677     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
678     // referencing the prototype object - let's speculatively load it's table nice and early!)
679     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
680
681     // Check eax is an object of the right Structure.
682     Jump failureCases1 = checkStructure(regT0, structure);
683
684     // Check the prototype object's Structure had not changed.
685     move(TrustedImmPtr(protoObject), regT3);
686     Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
687
688     bool needsStubLink = false;
689     
690     // Checks out okay!
691     if (slot.cachedPropertyType() == PropertySlot::Getter) {
692         needsStubLink = true;
693         compileGetDirectOffset(protoObject, regT1, cachedOffset);
694         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
695         stubCall.addArgument(regT1);
696         stubCall.addArgument(regT0);
697         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
698         stubCall.call();
699     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
700         needsStubLink = true;
701         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
702         stubCall.addArgument(TrustedImmPtr(protoObject));
703         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
704         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
705         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
706         stubCall.call();
707     } else
708         compileGetDirectOffset(protoObject, regT0, cachedOffset);
709     Jump success = jump();
710     LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
711
712     // Use the patch information to link the failure cases back to the original slow case routine.
713     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
714     patchBuffer.link(failureCases1, slowCaseBegin);
715     patchBuffer.link(failureCases2, slowCaseBegin);
716
717     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
718     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
719
720     if (needsStubLink) {
721         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
722             if (iter->to)
723                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
724         }
725     }
726     // Track the stub we have created so that it will be deleted later.
727     stubInfo->stubRoutine = FINALIZE_CODE(
728         patchBuffer,
729         ("Baseline JIT get_by_id proto stub for CodeBlock %p, return point %p",
730          m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
731              stubInfo->patch.baseline.u.get.putResult).executableAddress()));
732
733     // Finally patch the jump to slow case back in the hot path to jump here instead.
734     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
735     RepatchBuffer repatchBuffer(m_codeBlock);
736     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
737
738     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
739     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
740 }
741
742 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
743 {
744     Jump failureCase = checkStructure(regT0, structure);
745     bool needsStubLink = false;
746     bool isDirect = false;
747     if (slot.cachedPropertyType() == PropertySlot::Getter) {
748         needsStubLink = true;
749         compileGetDirectOffset(regT0, regT1, cachedOffset);
750         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
751         stubCall.addArgument(regT1);
752         stubCall.addArgument(regT0);
753         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
754         stubCall.call();
755     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
756         needsStubLink = true;
757         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
758         stubCall.addArgument(regT0);
759         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
760         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
761         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
762         stubCall.call();
763     } else {
764         isDirect = true;
765         compileGetDirectOffset(regT0, regT0, cachedOffset);
766     }
767     Jump success = jump();
768
769     LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
770
771     if (needsStubLink) {
772         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
773             if (iter->to)
774                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
775         }
776     }
777
778     // Use the patch information to link the failure cases back to the original slow case routine.
779     CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
780     if (!lastProtoBegin)
781         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
782
783     patchBuffer.link(failureCase, lastProtoBegin);
784
785     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
786     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
787
788     MacroAssemblerCodeRef stubCode = FINALIZE_CODE(
789         patchBuffer,
790         ("Baseline JIT get_by_id list stub for CodeBlock %p, return point %p",
791          m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
792              stubInfo->patch.baseline.u.get.putResult).executableAddress()));
793
794     polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
795
796     // Finally patch the jump to slow case back in the hot path to jump here instead.
797     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
798     RepatchBuffer repatchBuffer(m_codeBlock);
799     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
800 }
801
802 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
803 {
804     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
805     // referencing the prototype object - let's speculatively load it's table nice and early!)
806     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
807
808     // Check eax is an object of the right Structure.
809     Jump failureCases1 = checkStructure(regT0, structure);
810
811     // Check the prototype object's Structure had not changed.
812     move(TrustedImmPtr(protoObject), regT3);
813     Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
814
815     // Checks out okay!
816     bool needsStubLink = false;
817     bool isDirect = false;
818     if (slot.cachedPropertyType() == PropertySlot::Getter) {
819         needsStubLink = true;
820         compileGetDirectOffset(protoObject, regT1, cachedOffset);
821         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
822         stubCall.addArgument(regT1);
823         stubCall.addArgument(regT0);
824         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
825         stubCall.call();
826     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
827         needsStubLink = true;
828         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
829         stubCall.addArgument(TrustedImmPtr(protoObject));
830         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
831         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
832         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
833         stubCall.call();
834     } else {
835         isDirect = true;
836         compileGetDirectOffset(protoObject, regT0, cachedOffset);
837     }
838
839     Jump success = jump();
840
841     LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
842
843     if (needsStubLink) {
844         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
845             if (iter->to)
846                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
847         }
848     }
849
850     // Use the patch information to link the failure cases back to the original slow case routine.
851     CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
852     patchBuffer.link(failureCases1, lastProtoBegin);
853     patchBuffer.link(failureCases2, lastProtoBegin);
854
855     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
856     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
857
858     MacroAssemblerCodeRef stubCode = FINALIZE_CODE(
859         patchBuffer,
860         ("Baseline JIT get_by_id proto list stub for CodeBlock %p, return point %p",
861          m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
862              stubInfo->patch.baseline.u.get.putResult).executableAddress()));
863     prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
864
865     // Finally patch the jump to slow case back in the hot path to jump here instead.
866     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
867     RepatchBuffer repatchBuffer(m_codeBlock);
868     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
869 }
870
871 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
872 {
873     ASSERT(count);
874     JumpList bucketsOfFail;
875
876     // Check eax is an object of the right Structure.
877     Jump baseObjectCheck = checkStructure(regT0, structure);
878     bucketsOfFail.append(baseObjectCheck);
879
880     Structure* currStructure = structure;
881     WriteBarrier<Structure>* it = chain->head();
882     JSObject* protoObject = 0;
883     for (unsigned i = 0; i < count; ++i, ++it) {
884         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
885         currStructure = it->get();
886         testPrototype(protoObject, bucketsOfFail);
887     }
888     ASSERT(protoObject);
889     
890     bool needsStubLink = false;
891     bool isDirect = false;
892     if (slot.cachedPropertyType() == PropertySlot::Getter) {
893         needsStubLink = true;
894         compileGetDirectOffset(protoObject, regT1, cachedOffset);
895         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
896         stubCall.addArgument(regT1);
897         stubCall.addArgument(regT0);
898         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
899         stubCall.call();
900     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
901         needsStubLink = true;
902         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
903         stubCall.addArgument(TrustedImmPtr(protoObject));
904         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
905         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
906         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
907         stubCall.call();
908     } else {
909         isDirect = true;
910         compileGetDirectOffset(protoObject, regT0, cachedOffset);
911     }
912     Jump success = jump();
913
914     LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
915     
916     if (needsStubLink) {
917         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
918             if (iter->to)
919                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
920         }
921     }
922
923     // Use the patch information to link the failure cases back to the original slow case routine.
924     CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
925
926     patchBuffer.link(bucketsOfFail, lastProtoBegin);
927
928     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
929     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
930
931     CodeRef stubRoutine = FINALIZE_CODE(
932         patchBuffer,
933         ("Baseline JIT get_by_id chain list stub for CodeBlock %p, return point %p",
934          m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
935              stubInfo->patch.baseline.u.get.putResult).executableAddress()));
936
937     // Track the stub we have created so that it will be deleted later.
938     prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
939
940     // Finally patch the jump to slow case back in the hot path to jump here instead.
941     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
942     RepatchBuffer repatchBuffer(m_codeBlock);
943     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
944 }
945
946 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
947 {
948     ASSERT(count);
949
950     JumpList bucketsOfFail;
951
952     // Check eax is an object of the right Structure.
953     bucketsOfFail.append(checkStructure(regT0, structure));
954
955     Structure* currStructure = structure;
956     WriteBarrier<Structure>* it = chain->head();
957     JSObject* protoObject = 0;
958     for (unsigned i = 0; i < count; ++i, ++it) {
959         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
960         currStructure = it->get();
961         testPrototype(protoObject, bucketsOfFail);
962     }
963     ASSERT(protoObject);
964
965     bool needsStubLink = false;
966     if (slot.cachedPropertyType() == PropertySlot::Getter) {
967         needsStubLink = true;
968         compileGetDirectOffset(protoObject, regT1, cachedOffset);
969         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
970         stubCall.addArgument(regT1);
971         stubCall.addArgument(regT0);
972         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
973         stubCall.call();
974     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
975         needsStubLink = true;
976         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
977         stubCall.addArgument(TrustedImmPtr(protoObject));
978         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
979         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
980         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
981         stubCall.call();
982     } else
983         compileGetDirectOffset(protoObject, regT0, cachedOffset);
984     Jump success = jump();
985
986     LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
987
988     if (needsStubLink) {
989         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
990             if (iter->to)
991                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
992         }
993     }
994
995     // Use the patch information to link the failure cases back to the original slow case routine.
996     patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
997
998     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
999     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
1000
1001     // Track the stub we have created so that it will be deleted later.
1002     CodeRef stubRoutine = FINALIZE_CODE(
1003         patchBuffer,
1004         ("Baseline JIT get_by_id chain stub for CodeBlock %p, return point %p",
1005          m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
1006              stubInfo->patch.baseline.u.get.putResult).executableAddress()));
1007     stubInfo->stubRoutine = stubRoutine;
1008
1009     // Finally patch the jump to slow case back in the hot path to jump here instead.
1010     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
1011     RepatchBuffer repatchBuffer(m_codeBlock);
1012     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
1013
1014     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1015     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
1016 }
1017
1018 void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
1019 {
1020     int skip = currentInstruction[3].u.operand;
1021
1022     emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
1023     bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
1024     ASSERT(skip || !checkTopLevel);
1025     if (checkTopLevel && skip--) {
1026         Jump activationNotCreated;
1027         if (checkTopLevel)
1028             activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
1029         loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
1030         activationNotCreated.link(this);
1031     }
1032     while (skip--)
1033         loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
1034
1035     loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
1036     loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
1037     loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
1038     emitValueProfilingSite();
1039     emitPutVirtualRegister(currentInstruction[1].u.operand);
1040 }
1041
1042 void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
1043 {
1044     int skip = currentInstruction[2].u.operand;
1045
1046     emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
1047
1048     emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
1049     bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
1050     ASSERT(skip || !checkTopLevel);
1051     if (checkTopLevel && skip--) {
1052         Jump activationNotCreated;
1053         if (checkTopLevel)
1054             activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
1055         loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
1056         activationNotCreated.link(this);
1057     }
1058     while (skip--)
1059         loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
1060     loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
1061
1062     emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
1063
1064     loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
1065     storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
1066 }
1067
1068 void JIT::emit_op_get_global_var(Instruction* currentInstruction)
1069 {
1070     loadPtr(currentInstruction[2].u.registerPointer, regT0);
1071     emitValueProfilingSite();
1072     emitPutVirtualRegister(currentInstruction[1].u.operand);
1073 }
1074
1075 void JIT::emit_op_put_global_var(Instruction* currentInstruction)
1076 {
1077     JSGlobalObject* globalObject = m_codeBlock->globalObject();
1078
1079     emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
1080     
1081     storePtr(regT0, currentInstruction[1].u.registerPointer);
1082     if (Heap::isWriteBarrierEnabled())
1083         emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
1084 }
1085
1086 void JIT::emit_op_put_global_var_check(Instruction* currentInstruction)
1087 {
1088     emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
1089     
1090     addSlowCase(branchTest8(NonZero, AbsoluteAddress(currentInstruction[3].u.predicatePointer)));
1091
1092     JSGlobalObject* globalObject = m_codeBlock->globalObject();
1093     
1094     storePtr(regT0, currentInstruction[1].u.registerPointer);
1095     if (Heap::isWriteBarrierEnabled())
1096         emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
1097 }
1098
1099 void JIT::emitSlow_op_put_global_var_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1100 {
1101     linkSlowCase(iter);
1102     
1103     JITStubCall stubCall(this, cti_op_put_global_var_check);
1104     stubCall.addArgument(regT0);
1105     stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
1106     stubCall.call();
1107 }
1108
1109 void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
1110 {
1111     repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
1112     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(-1));
1113     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), 0);
1114     repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
1115 }
1116
1117 void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
1118 {
1119     if (isDirectPutById(stubInfo))
1120         repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
1121     else
1122         repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
1123     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(-1));
1124     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.put.displacementLabel), 0);
1125 }
1126
1127 #endif // USE(JSVALUE64)
1128
1129 void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratch, RegisterID scratch2, WriteBarrierMode mode, WriteBarrierUseKind useKind)
1130 {
1131     UNUSED_PARAM(owner);
1132     UNUSED_PARAM(scratch);
1133     UNUSED_PARAM(scratch2);
1134     UNUSED_PARAM(useKind);
1135     UNUSED_PARAM(value);
1136     UNUSED_PARAM(mode);
1137     ASSERT(owner != scratch);
1138     ASSERT(owner != scratch2);
1139     
1140 #if ENABLE(WRITE_BARRIER_PROFILING)
1141     emitCount(WriteBarrierCounters::jitCounterFor(useKind));
1142 #endif
1143     
1144 #if ENABLE(GGC)
1145     Jump filterCells;
1146     if (mode == ShouldFilterImmediates)
1147         filterCells = emitJumpIfNotJSCell(value);
1148     move(owner, scratch);
1149     andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch);
1150     move(owner, scratch2);
1151     // consume additional 8 bits as we're using an approximate filter
1152     rshift32(TrustedImm32(MarkedBlock::atomShift + 8), scratch2);
1153     andPtr(TrustedImm32(MarkedBlock::atomMask >> 8), scratch2);
1154     Jump filter = branchTest8(Zero, BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfMarks()));
1155     move(owner, scratch2);
1156     rshift32(TrustedImm32(MarkedBlock::cardShift), scratch2);
1157     andPtr(TrustedImm32(MarkedBlock::cardMask), scratch2);
1158     store8(TrustedImm32(1), BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfCards()));
1159     filter.link(this);
1160     if (mode == ShouldFilterImmediates)
1161         filterCells.link(this);
1162 #endif
1163 }
1164
1165 void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode mode, WriteBarrierUseKind useKind)
1166 {
1167     UNUSED_PARAM(owner);
1168     UNUSED_PARAM(scratch);
1169     UNUSED_PARAM(useKind);
1170     UNUSED_PARAM(value);
1171     UNUSED_PARAM(mode);
1172     
1173 #if ENABLE(WRITE_BARRIER_PROFILING)
1174     emitCount(WriteBarrierCounters::jitCounterFor(useKind));
1175 #endif
1176     
1177 #if ENABLE(GGC)
1178     Jump filterCells;
1179     if (mode == ShouldFilterImmediates)
1180         filterCells = emitJumpIfNotJSCell(value);
1181     uint8_t* cardAddress = Heap::addressOfCardFor(owner);
1182     move(TrustedImmPtr(cardAddress), scratch);
1183     store8(TrustedImm32(1), Address(scratch));
1184     if (mode == ShouldFilterImmediates)
1185         filterCells.link(this);
1186 #endif
1187 }
1188
1189 void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
1190 {
1191     if (prototype.isNull())
1192         return;
1193
1194     ASSERT(prototype.isCell());
1195     move(TrustedImmPtr(prototype.asCell()), regT3);
1196     failureCases.append(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototype.asCell()->structure())));
1197 }
1198
1199 void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, StructureStubInfo& stubInfo, JSObject* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
1200 {
1201     RepatchBuffer repatchBuffer(codeBlock);
1202     
1203     CodeLocationDataLabelPtr structureLocation = methodCallLinkInfo.cachedStructure.location();
1204     methodCallLinkInfo.cachedStructure.set(globalData, structureLocation, codeBlock->ownerExecutable(), structure);
1205     
1206     Structure* prototypeStructure = proto->structure();
1207     methodCallLinkInfo.cachedPrototypeStructure.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoStructureToCompare), codeBlock->ownerExecutable(), prototypeStructure);
1208     methodCallLinkInfo.cachedPrototype.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoObj), codeBlock->ownerExecutable(), proto);
1209     methodCallLinkInfo.cachedFunction.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckPutFunction), codeBlock->ownerExecutable(), callee);
1210     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_method_check_update));
1211 }
1212
1213 bool JIT::isDirectPutById(StructureStubInfo* stubInfo)
1214 {
1215     switch (stubInfo->accessType) {
1216     case access_put_by_id_transition_normal:
1217         return false;
1218     case access_put_by_id_transition_direct:
1219         return true;
1220     case access_put_by_id_replace:
1221     case access_put_by_id_generic: {
1222         void* oldCall = MacroAssembler::readCallTarget(stubInfo->callReturnLocation).executableAddress();
1223         if (oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct)
1224             || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_generic)
1225             || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_fail))
1226             return true;
1227         ASSERT(oldCall == bitwise_cast<void*>(cti_op_put_by_id)
1228                || oldCall == bitwise_cast<void*>(cti_op_put_by_id_generic)
1229                || oldCall == bitwise_cast<void*>(cti_op_put_by_id_fail));
1230         return false;
1231     }
1232     default:
1233         ASSERT_NOT_REACHED();
1234         return false;
1235     }
1236 }
1237
1238 } // namespace JSC
1239
1240 #endif // ENABLE(JIT)