2 * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "DirectArguments.h"
33 #include "GCAwareJITStubRoutine.h"
34 #include "GetterSetter.h"
35 #include "Interpreter.h"
36 #include "JITInlines.h"
38 #include "JSEnvironmentRecord.h"
39 #include "JSFunction.h"
40 #include "LinkBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
43 #include "ScopedArguments.h"
44 #include "ScopedArgumentsTable.h"
45 #include "SlowPathCall.h"
46 #include <wtf/StringPrintStream.h>
52 JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
54 JSInterfaceJIT jit(vm);
56 failures.append(jit.branchStructure(
58 Address(regT0, JSCell::structureIDOffset()),
59 vm->stringStructure.get()));
61 // Load string length to regT2, and start the process of loading the data pointer into regT0
62 jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
63 jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
64 failures.append(jit.branchTest32(Zero, regT0));
66 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
67 failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
72 // Load the string flags
73 jit.loadPtr(Address(regT0, StringImpl::flagsOffset()), regT2);
74 jit.loadPtr(Address(regT0, StringImpl::dataOffset()), regT0);
75 is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(StringImpl::flagIs8Bit())));
76 jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
77 cont8Bit.append(jit.jump());
79 jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
82 failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
83 jit.move(TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), regT1);
84 jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
88 jit.move(TrustedImm32(0), regT0);
91 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
92 return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
95 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
97 int dst = currentInstruction[1].u.operand;
98 int base = currentInstruction[2].u.operand;
99 int property = currentInstruction[3].u.operand;
100 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
101 ByValInfo* byValInfo = m_codeBlock->addByValInfo();
103 emitGetVirtualRegisters(base, regT0, property, regT1);
105 emitJumpSlowCaseIfNotJSCell(regT0, base);
107 PatchableJump notIndex = emitPatchableJumpIfNotInt(regT1);
108 addSlowCase(notIndex);
110 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
111 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
112 // number was signed since m_vectorLength is always less than intmax (since the total allocation
113 // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
114 // to 64-bits is necessary since it's used in the address calculation). We zero extend rather than sign
115 // extending since it makes it easier to re-tag the value in the slow case.
116 zeroExtend32ToPtr(regT1, regT1);
118 emitArrayProfilingSiteWithCell(regT0, regT2, profile);
119 and32(TrustedImm32(IndexingShapeMask), regT2);
121 PatchableJump badType;
124 JITArrayMode mode = chooseArrayMode(profile);
127 slowCases = emitInt32GetByVal(currentInstruction, badType);
130 slowCases = emitDoubleGetByVal(currentInstruction, badType);
133 slowCases = emitContiguousGetByVal(currentInstruction, badType);
135 case JITArrayStorage:
136 slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
143 addSlowCase(badType);
144 addSlowCase(slowCases);
146 Label done = label();
148 if (!ASSERT_DISABLED) {
149 Jump resultOK = branchTest64(NonZero, regT0);
150 abortWithReason(JITGetByValResultIsNotEmpty);
154 emitValueProfilingSite();
155 emitPutVirtualRegister(dst);
157 Label nextHotPath = label();
159 m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
162 JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
166 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape));
167 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
168 slowCases.append(branchIfNotToSpace(regT2));
169 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
170 loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
171 slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
176 JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
180 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape));
181 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
182 slowCases.append(branchIfNotToSpace(regT2));
183 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
184 load64(BaseIndex(regT2, regT1, TimesEight), regT0);
185 slowCases.append(branchTest64(Zero, regT0));
190 JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
194 add32(TrustedImm32(-ArrayStorageShape), regT2, regT3);
195 badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
197 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
198 slowCases.append(branchIfNotToSpace(regT2));
199 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
201 load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
202 slowCases.append(branchTest64(Zero, regT0));
207 JITGetByIdGenerator JIT::emitGetByValWithCachedId(Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
213 int dst = currentInstruction[1].u.operand;
215 slowCases.append(emitJumpIfNotJSCell(regT1));
216 emitIdentifierCheck(regT1, regT3, propertyName, slowCases);
218 JITGetByIdGenerator gen(
219 m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
220 JSValueRegs(regT0), JSValueRegs(regT0));
221 gen.generateFastPath(*this);
223 fastDoneCase = jump();
225 Label coldPathBegin = label();
226 gen.slowPathJump().link(this);
228 Call call = callOperation(WithProfile, operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl());
229 gen.reportSlowPathCall(coldPathBegin, call);
230 slowDoneCase = jump();
235 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
237 int dst = currentInstruction[1].u.operand;
238 int base = currentInstruction[2].u.operand;
239 int property = currentInstruction[3].u.operand;
240 ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
242 linkSlowCaseIfNotJSCell(iter, base); // base cell check
243 linkSlowCase(iter); // property int32 check
244 Jump nonCell = jump();
245 linkSlowCase(iter); // base array check
246 Jump notString = branchStructure(NotEqual,
247 Address(regT0, JSCell::structureIDOffset()),
248 m_vm->stringStructure.get());
249 emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code()));
250 Jump failed = branchTest64(Zero, regT0);
251 emitPutVirtualRegister(dst, regT0);
252 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
254 notString.link(this);
257 linkSlowCase(iter); // read barrier
258 linkSlowCase(iter); // vector length check
259 linkSlowCase(iter); // empty value
261 Label slowPath = label();
263 emitGetVirtualRegister(base, regT0);
264 emitGetVirtualRegister(property, regT1);
265 Call call = callOperation(operationGetByValOptimize, dst, regT0, regT1, byValInfo);
267 m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
268 m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
269 m_byValInstructionIndex++;
271 emitValueProfilingSite();
274 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
276 int base = currentInstruction[1].u.operand;
277 int property = currentInstruction[2].u.operand;
278 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
279 ByValInfo* byValInfo = m_codeBlock->addByValInfo();
281 emitGetVirtualRegisters(base, regT0, property, regT1);
282 emitJumpSlowCaseIfNotJSCell(regT0, base);
283 PatchableJump notIndex = emitPatchableJumpIfNotInt(regT1);
284 addSlowCase(notIndex);
285 // See comment in op_get_by_val.
286 zeroExtend32ToPtr(regT1, regT1);
287 emitArrayProfilingSiteWithCell(regT0, regT2, profile);
288 and32(TrustedImm32(IndexingShapeMask), regT2);
290 PatchableJump badType;
293 JITArrayMode mode = chooseArrayMode(profile);
296 slowCases = emitInt32PutByVal(currentInstruction, badType);
299 slowCases = emitDoublePutByVal(currentInstruction, badType);
302 slowCases = emitContiguousPutByVal(currentInstruction, badType);
304 case JITArrayStorage:
305 slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
312 addSlowCase(badType);
313 addSlowCase(slowCases);
315 Label done = label();
317 m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done));
320 JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
322 int value = currentInstruction[3].u.operand;
323 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
327 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape));
329 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
330 slowCases.append(branchIfNotToSpace(regT2));
331 Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));
333 Label storeResult = label();
334 emitGetVirtualRegister(value, regT3);
335 switch (indexingShape) {
337 slowCases.append(emitJumpIfNotInt(regT3));
338 store64(regT3, BaseIndex(regT2, regT1, TimesEight));
341 Jump notInt = emitJumpIfNotInt(regT3);
342 convertInt32ToDouble(regT3, fpRegT0);
345 add64(tagTypeNumberRegister, regT3);
346 move64ToDouble(regT3, fpRegT0);
347 slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
349 storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight));
352 case ContiguousShape:
353 store64(regT3, BaseIndex(regT2, regT1, TimesEight));
354 emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue);
362 outOfBounds.link(this);
364 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength())));
366 emitArrayProfileStoreToHoleSpecialCase(profile);
368 add32(TrustedImm32(1), regT1, regT3);
369 store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength()));
370 jump().linkTo(storeResult, this);
377 JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
379 int value = currentInstruction[3].u.operand;
380 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
384 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape));
385 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
386 slowCases.append(branchIfNotToSpace(regT2));
387 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
389 Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
391 Label storeResult(this);
392 emitGetVirtualRegister(value, regT3);
393 store64(regT3, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
394 emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue);
398 emitArrayProfileStoreToHoleSpecialCase(profile);
399 add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset()));
400 branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this);
402 add32(TrustedImm32(1), regT1);
403 store32(regT1, Address(regT2, ArrayStorage::lengthOffset()));
404 sub32(TrustedImm32(1), regT1);
405 jump().linkTo(storeResult, this);
412 JITPutByIdGenerator JIT::emitPutByValWithCachedId(Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
418 int base = currentInstruction[1].u.operand;
419 int value = currentInstruction[3].u.operand;
421 slowCases.append(emitJumpIfNotJSCell(regT1));
422 emitIdentifierCheck(regT1, regT1, propertyName, slowCases);
424 // Write barrier breaks the registers. So after issuing the write barrier,
425 // reload the registers.
426 emitWriteBarrier(base, value, ShouldFilterValue);
427 emitGetVirtualRegisters(base, regT0, value, regT1);
429 JITPutByIdGenerator gen(
430 m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
431 JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), putKind);
432 gen.generateFastPath(*this);
433 doneCases.append(jump());
435 Label coldPathBegin = label();
436 gen.slowPathJump().link(this);
438 Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, propertyName.impl());
439 gen.reportSlowPathCall(coldPathBegin, call);
440 doneCases.append(jump());
445 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
447 int base = currentInstruction[1].u.operand;
448 int property = currentInstruction[2].u.operand;
449 int value = currentInstruction[3].u.operand;
450 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
451 ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
453 linkSlowCaseIfNotJSCell(iter, base); // base cell check
454 linkSlowCase(iter); // property int32 check
455 linkSlowCase(iter); // base not array check
457 linkSlowCase(iter); // read barrier
458 linkSlowCase(iter); // out of bounds
460 JITArrayMode mode = chooseArrayMode(profile);
464 linkSlowCase(iter); // value type check
470 Label slowPath = label();
472 emitGetVirtualRegister(base, regT0);
473 emitGetVirtualRegister(property, regT1);
474 emitGetVirtualRegister(value, regT2);
475 bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
476 Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT0, regT1, regT2, byValInfo);
478 m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
479 m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
480 m_byValInstructionIndex++;
483 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
485 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
486 emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
487 callOperation(operationPutByIndex, regT0, currentInstruction[2].u.operand, regT1);
490 void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction)
492 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
493 int32_t options = currentInstruction[3].u.operand;
494 emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
495 callOperation(operationPutGetterById, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), options, regT1);
498 void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction)
500 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
501 int32_t options = currentInstruction[3].u.operand;
502 emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
503 callOperation(operationPutSetterById, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), options, regT1);
506 void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
508 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
509 int32_t attribute = currentInstruction[3].u.operand;
510 emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
511 emitGetVirtualRegister(currentInstruction[5].u.operand, regT2);
512 callOperation(operationPutGetterSetter, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), attribute, regT1, regT2);
515 void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction)
517 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
518 emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
519 int32_t attributes = currentInstruction[3].u.operand;
520 emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
521 callOperation(operationPutGetterByVal, regT0, regT1, attributes, regT2);
524 void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction)
526 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
527 emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
528 int32_t attributes = currentInstruction[3].u.operand;
529 emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
530 callOperation(operationPutSetterByVal, regT0, regT1, attributes, regT2);
533 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
535 int dst = currentInstruction[1].u.operand;
536 int base = currentInstruction[2].u.operand;
537 int property = currentInstruction[3].u.operand;
538 emitGetVirtualRegister(base, regT0);
539 callOperation(operationDeleteById, dst, regT0, &m_codeBlock->identifier(property));
542 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
544 int resultVReg = currentInstruction[1].u.operand;
545 int baseVReg = currentInstruction[2].u.operand;
546 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
548 emitGetVirtualRegister(baseVReg, regT0);
550 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
552 if (*ident == m_vm->propertyNames->length && shouldEmitProfiling())
553 emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT1, m_bytecodeOffset);
555 JITGetByIdGenerator gen(
556 m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
557 JSValueRegs(regT0), JSValueRegs(regT0));
558 gen.generateFastPath(*this);
559 addSlowCase(gen.slowPathJump());
560 m_getByIds.append(gen);
562 emitValueProfilingSite();
563 emitPutVirtualRegister(resultVReg);
564 assertStackPointerOffset();
567 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
569 int resultVReg = currentInstruction[1].u.operand;
570 int baseVReg = currentInstruction[2].u.operand;
571 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
573 linkSlowCaseIfNotJSCell(iter, baseVReg);
576 JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
578 Label coldPathBegin = label();
580 Call call = callOperation(WithProfile, operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
582 gen.reportSlowPathCall(coldPathBegin, call);
585 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
587 int baseVReg = currentInstruction[1].u.operand;
588 int valueVReg = currentInstruction[3].u.operand;
589 unsigned direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
591 emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase);
593 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
594 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
595 // such that the Structure & offset are always at the same distance from this.
597 emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
599 emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
601 JITPutByIdGenerator gen(
602 m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
603 JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(),
604 direct ? Direct : NotDirect);
606 gen.generateFastPath(*this);
607 addSlowCase(gen.slowPathJump());
609 m_putByIds.append(gen);
612 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
614 int baseVReg = currentInstruction[1].u.operand;
615 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
617 linkSlowCaseIfNotJSCell(iter, baseVReg);
620 Label coldPathBegin(this);
622 JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
624 Call call = callOperation(
625 gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, ident->impl());
627 gen.reportSlowPathCall(coldPathBegin, call);
630 void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
632 if (!needsVarInjectionChecks)
634 addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
637 void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth)
639 emitVarInjectionCheck(needsVarInjectionChecks);
640 emitGetVirtualRegister(scope, regT0);
641 for (unsigned i = 0; i < depth; ++i)
642 loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
643 emitPutVirtualRegister(dst);
646 void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
648 int dst = currentInstruction[1].u.operand;
649 int scope = currentInstruction[2].u.operand;
650 ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
651 unsigned depth = currentInstruction[5].u.operand;
653 auto emitCode = [&] (ResolveType resolveType) {
654 switch (resolveType) {
657 case GlobalPropertyWithVarInjectionChecks:
658 case GlobalVarWithVarInjectionChecks:
659 case GlobalLexicalVar:
660 case GlobalLexicalVarWithVarInjectionChecks: {
661 JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
662 RELEASE_ASSERT(constantScope);
663 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
664 move(TrustedImmPtr(constantScope), regT0);
665 emitPutVirtualRegister(dst);
669 case ClosureVarWithVarInjectionChecks:
670 emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth);
673 move(TrustedImmPtr(currentInstruction[6].u.jsCell.get()), regT0);
674 emitPutVirtualRegister(dst);
679 case LocalClosureVar:
680 case UnresolvedProperty:
681 case UnresolvedPropertyWithVarInjectionChecks:
682 RELEASE_ASSERT_NOT_REACHED();
686 switch (resolveType) {
687 case UnresolvedProperty:
688 case UnresolvedPropertyWithVarInjectionChecks: {
690 load32(¤tInstruction[4], regT0);
692 Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty));
693 emitCode(GlobalProperty);
694 skipToEnd.append(jump());
695 notGlobalProperty.link(this);
697 Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
698 emitCode(GlobalPropertyWithVarInjectionChecks);
699 skipToEnd.append(jump());
700 notGlobalPropertyWithVarInjections.link(this);
702 Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
703 emitCode(GlobalLexicalVar);
704 skipToEnd.append(jump());
705 notGlobalLexicalVar.link(this);
707 Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
708 emitCode(GlobalLexicalVarWithVarInjectionChecks);
709 skipToEnd.append(jump());
710 notGlobalLexicalVarWithVarInjections.link(this);
713 skipToEnd.link(this);
718 emitCode(resolveType);
723 void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
725 ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
726 if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar || resolveType == GlobalLexicalVar || resolveType == ModuleVar)
729 if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
730 linkSlowCase(iter); // var injections check for GlobalPropertyWithVarInjectionChecks.
731 linkSlowCase(iter); // var injections check for GlobalLexicalVarWithVarInjectionChecks.
735 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resolve_scope);
739 void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
741 emitGetVirtualRegister(scope, regT0);
742 loadPtr(structureSlot, regT1);
743 addSlowCase(branchTestPtr(Zero, regT1));
744 load32(Address(regT1, Structure::structureIDOffset()), regT1);
745 addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1));
748 void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg reg)
750 loadPtr(operand, reg);
753 void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg reg)
755 loadPtr(operand, reg);
759 void JIT::emitGetClosureVar(int scope, uintptr_t operand)
761 emitGetVirtualRegister(scope, regT0);
762 loadPtr(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register)), regT0);
765 void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
767 int dst = currentInstruction[1].u.operand;
768 int scope = currentInstruction[2].u.operand;
769 ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
770 Structure** structureSlot = currentInstruction[5].u.structure.slot();
771 uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(¤tInstruction[6].u.pointer);
773 auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
774 switch (resolveType) {
776 case GlobalPropertyWithVarInjectionChecks: {
777 emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
779 GPRReg result = regT0;
780 GPRReg offset = regT1;
781 GPRReg scratch = regT2;
783 load32(operandSlot, offset);
784 if (!ASSERT_DISABLED) {
785 Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
786 abortWithReason(JITOffsetIsNotOutOfLine);
787 isOutOfLine.link(this);
789 loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
790 addSlowCase(branchIfNotToSpace(scratch));
792 signExtend32ToPtr(offset, offset);
793 load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
797 case GlobalVarWithVarInjectionChecks:
798 case GlobalLexicalVar:
799 case GlobalLexicalVarWithVarInjectionChecks:
800 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
801 if (indirectLoadForOperand)
802 emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
804 emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
805 if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check.
806 addSlowCase(branchTest64(Zero, regT0));
809 case ClosureVarWithVarInjectionChecks:
810 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
811 emitGetClosureVar(scope, *operandSlot);
816 case LocalClosureVar:
818 case UnresolvedProperty:
819 case UnresolvedPropertyWithVarInjectionChecks:
820 RELEASE_ASSERT_NOT_REACHED();
824 switch (resolveType) {
825 case UnresolvedProperty:
826 case UnresolvedPropertyWithVarInjectionChecks: {
828 load32(¤tInstruction[4], regT0);
829 and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
831 Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
832 Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
833 isGlobalProperty.link(this);
834 emitCode(GlobalProperty, false);
835 skipToEnd.append(jump());
836 notGlobalPropertyWithVarInjections.link(this);
838 Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
839 emitCode(GlobalLexicalVar, true);
840 skipToEnd.append(jump());
841 notGlobalLexicalVar.link(this);
843 Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
844 emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
845 skipToEnd.append(jump());
846 notGlobalLexicalVarWithVarInjections.link(this);
850 skipToEnd.link(this);
855 emitCode(resolveType, false);
858 emitPutVirtualRegister(dst);
859 emitValueProfilingSite();
862 void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
864 int dst = currentInstruction[1].u.operand;
865 ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
867 if (resolveType == GlobalVar || resolveType == ClosureVar)
870 if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
871 linkSlowCase(iter); // bad structure
872 linkSlowCase(iter); // read barrier
875 if (resolveType == GlobalLexicalVarWithVarInjectionChecks) // Var injections check.
878 if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
879 // GlobalProperty/GlobalPropertyWithVarInjectionChecks
880 linkSlowCase(iter); // emitLoadWithStructureCheck
881 linkSlowCase(iter); // emitLoadWithStructureCheck
882 linkSlowCase(iter); // read barrier
884 linkSlowCase(iter); // TDZ check.
885 // GlobalLexicalVarWithVarInjectionChecks.
886 linkSlowCase(iter); // var injection check.
887 linkSlowCase(iter); // TDZ check.
892 callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
895 void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set)
897 emitGetVirtualRegister(value, regT0);
898 emitNotifyWrite(set);
899 storePtr(regT0, operand);
901 void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet)
903 emitGetVirtualRegister(value, regT0);
904 loadPtr(indirectWatchpointSet, regT1);
905 emitNotifyWrite(regT1);
906 loadPtr(addressOfOperand, regT1);
907 storePtr(regT0, regT1);
910 void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set)
912 emitGetVirtualRegister(value, regT1);
913 emitGetVirtualRegister(scope, regT0);
914 emitNotifyWrite(set);
915 storePtr(regT1, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register)));
918 void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
920 int scope = currentInstruction[1].u.operand;
921 int value = currentInstruction[3].u.operand;
922 GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
923 ResolveType resolveType = getPutInfo.resolveType();
924 Structure** structureSlot = currentInstruction[5].u.structure.slot();
925 uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(¤tInstruction[6].u.pointer);
927 auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
928 switch (resolveType) {
930 case GlobalPropertyWithVarInjectionChecks: {
931 emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
932 emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
933 emitGetVirtualRegister(value, regT2);
935 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
936 addSlowCase(branchIfNotToSpace(regT0));
937 loadPtr(operandSlot, regT1);
939 storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
943 case GlobalVarWithVarInjectionChecks:
944 case GlobalLexicalVar:
945 case GlobalLexicalVarWithVarInjectionChecks: {
946 JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
947 RELEASE_ASSERT(constantScope);
948 emitWriteBarrier(constantScope, value, ShouldFilterValue);
949 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
950 if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
951 // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically.
952 if (indirectLoadForOperand)
953 emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
955 emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
956 addSlowCase(branchTest64(Zero, regT0));
958 if (indirectLoadForOperand)
959 emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(¤tInstruction[5]));
961 emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet);
964 case LocalClosureVar:
966 case ClosureVarWithVarInjectionChecks:
967 emitWriteBarrier(scope, value, ShouldFilterValue);
968 emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
969 emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet);
975 case UnresolvedProperty:
976 case UnresolvedPropertyWithVarInjectionChecks:
977 RELEASE_ASSERT_NOT_REACHED();
982 switch (resolveType) {
983 case UnresolvedProperty:
984 case UnresolvedPropertyWithVarInjectionChecks: {
986 load32(¤tInstruction[4], regT0);
987 and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
989 Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
990 Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
991 isGlobalProperty.link(this);
992 emitCode(GlobalProperty, false);
993 skipToEnd.append(jump());
994 notGlobalPropertyWithVarInjections.link(this);
996 Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
997 emitCode(GlobalLexicalVar, true);
998 skipToEnd.append(jump());
999 notGlobalLexicalVar.link(this);
1001 Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
1002 emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
1003 skipToEnd.append(jump());
1004 notGlobalLexicalVarWithVarInjections.link(this);
1006 addSlowCase(jump());
1008 skipToEnd.link(this);
1013 emitCode(resolveType, false);
1018 void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1020 GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
1021 ResolveType resolveType = getPutInfo.resolveType();
1022 unsigned linkCount = 0;
1023 if (resolveType != GlobalVar && resolveType != ClosureVar && resolveType != LocalClosureVar && resolveType != GlobalLexicalVar)
1025 if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks
1026 || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks
1027 || resolveType == LocalClosureVar)
1028 && currentInstruction[5].u.watchpointSet->state() != IsInvalidated)
1030 if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
1031 linkCount++; // bad structure
1032 linkCount++; // read barrier
1034 if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) // TDZ check.
1036 if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
1037 // GlobalProperty/GlobalPropertyWithVarInjectionsCheck
1038 linkCount++; // emitLoadWithStructureCheck
1039 linkCount++; // emitLoadWithStructureCheck
1040 linkCount++; // read barrier
1043 bool needsTDZCheck = getPutInfo.initializationMode() != Initialization;
1046 linkCount++; // Notify write check.
1048 // GlobalLexicalVarWithVarInjectionsCheck
1049 linkCount++; // var injection check.
1052 linkCount++; // Notify write check.
1059 if (resolveType == ModuleVar) {
1060 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
1061 slowPathCall.call();
1063 callOperation(operationPutToScope, currentInstruction);
1066 void JIT::emit_op_get_from_arguments(Instruction* currentInstruction)
1068 int dst = currentInstruction[1].u.operand;
1069 int arguments = currentInstruction[2].u.operand;
1070 int index = currentInstruction[3].u.operand;
1072 emitGetVirtualRegister(arguments, regT0);
1073 load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0);
1074 emitValueProfilingSite();
1075 emitPutVirtualRegister(dst);
1078 void JIT::emit_op_put_to_arguments(Instruction* currentInstruction)
1080 int arguments = currentInstruction[1].u.operand;
1081 int index = currentInstruction[2].u.operand;
1082 int value = currentInstruction[3].u.operand;
1084 emitWriteBarrier(arguments, value, ShouldFilterValue);
1086 emitGetVirtualRegister(arguments, regT0);
1087 emitGetVirtualRegister(value, regT1);
1088 store64(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)));
1091 #endif // USE(JSVALUE64)
1094 void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
1097 if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
1098 emitGetVirtualRegister(value, regT0);
1099 valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
1102 emitGetVirtualRegister(owner, regT0);
1104 if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
1105 ownerNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
1107 Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(regT0);
1108 callOperation(operationUnconditionalWriteBarrier, regT0);
1109 ownerIsRememberedOrInEden.link(this);
1111 if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
1112 ownerNotCell.link(this);
1113 if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
1114 valueNotCell.link(this);
1117 void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
1119 emitGetVirtualRegister(value, regT0);
1121 if (mode == ShouldFilterValue)
1122 valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
1124 emitWriteBarrier(owner);
1126 if (mode == ShouldFilterValue)
1127 valueNotCell.link(this);
1130 #else // USE(JSVALUE64)
1132 void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
1135 if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
1136 emitLoadTag(value, regT0);
1137 valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
1140 emitLoad(owner, regT0, regT1);
1142 if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
1143 ownerNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
1145 Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(regT1);
1146 callOperation(operationUnconditionalWriteBarrier, regT1);
1147 ownerIsRememberedOrInEden.link(this);
1149 if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
1150 ownerNotCell.link(this);
1151 if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
1152 valueNotCell.link(this);
1155 void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
1158 if (mode == ShouldFilterValue) {
1159 emitLoadTag(value, regT0);
1160 valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
1163 emitWriteBarrier(owner);
1165 if (mode == ShouldFilterValue)
1166 valueNotCell.link(this);
1169 #endif // USE(JSVALUE64)
1171 void JIT::emitWriteBarrier(JSCell* owner)
1173 if (!MarkedBlock::blockFor(owner)->isMarked(owner)) {
1174 Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(owner);
1175 callOperation(operationUnconditionalWriteBarrier, owner);
1176 ownerIsRememberedOrInEden.link(this);
1178 callOperation(operationUnconditionalWriteBarrier, owner);
1181 void JIT::emitIdentifierCheck(RegisterID cell, RegisterID scratch, const Identifier& propertyName, JumpList& slowCases)
1183 if (propertyName.isSymbol()) {
1184 slowCases.append(branchStructure(NotEqual, Address(cell, JSCell::structureIDOffset()), m_vm->symbolStructure.get()));
1185 loadPtr(Address(cell, Symbol::offsetOfPrivateName()), scratch);
1187 slowCases.append(branchStructure(NotEqual, Address(cell, JSCell::structureIDOffset()), m_vm->stringStructure.get()));
1188 loadPtr(Address(cell, JSString::offsetOfValue()), scratch);
1190 slowCases.append(branchPtr(NotEqual, scratch, TrustedImmPtr(propertyName.impl())));
1193 void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
1195 Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
1197 PatchableJump badType;
1200 switch (arrayMode) {
1202 slowCases = emitInt32GetByVal(currentInstruction, badType);
1205 slowCases = emitDoubleGetByVal(currentInstruction, badType);
1208 slowCases = emitContiguousGetByVal(currentInstruction, badType);
1210 case JITArrayStorage:
1211 slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
1213 case JITDirectArguments:
1214 slowCases = emitDirectArgumentsGetByVal(currentInstruction, badType);
1216 case JITScopedArguments:
1217 slowCases = emitScopedArgumentsGetByVal(currentInstruction, badType);
1220 TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
1222 slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, type);
1224 slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, type);
1230 LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
1232 patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1233 patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1235 patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
1237 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1238 m_codeBlock, patchBuffer,
1239 ("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
1241 MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
1242 MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
1245 void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
1247 Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
1253 JITGetByIdGenerator gen = emitGetByValWithCachedId(currentInstruction, propertyName, fastDoneCase, slowDoneCase, slowCases);
1255 ConcurrentJITLocker locker(m_codeBlock->m_lock);
1256 LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
1257 patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1258 patchBuffer.link(fastDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
1259 patchBuffer.link(slowDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToNextHotPath));
1261 for (const auto& callSite : m_calls) {
1263 patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
1265 gen.finalize(patchBuffer);
1267 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1268 m_codeBlock, patchBuffer,
1269 ("Baseline get_by_val with cached property name '%s' stub for %s, return point %p", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()));
1270 byValInfo->stubInfo = gen.stubInfo();
1272 MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
1273 MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
1276 void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
1278 Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
1280 PatchableJump badType;
1283 bool needsLinkForWriteBarrier = false;
1285 switch (arrayMode) {
1287 slowCases = emitInt32PutByVal(currentInstruction, badType);
1290 slowCases = emitDoublePutByVal(currentInstruction, badType);
1293 slowCases = emitContiguousPutByVal(currentInstruction, badType);
1294 needsLinkForWriteBarrier = true;
1296 case JITArrayStorage:
1297 slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
1298 needsLinkForWriteBarrier = true;
1301 TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
1303 slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, type);
1305 slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, type);
1311 LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
1312 patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1313 patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1314 patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
1315 if (needsLinkForWriteBarrier) {
1316 ASSERT(m_calls.last().to == operationUnconditionalWriteBarrier);
1317 patchBuffer.link(m_calls.last().from, operationUnconditionalWriteBarrier);
1320 bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
1322 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1323 m_codeBlock, patchBuffer,
1324 ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
1327 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1328 m_codeBlock, patchBuffer,
1329 ("Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
1331 MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
1332 MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
1335 void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
1337 Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
1342 JITPutByIdGenerator gen = emitPutByValWithCachedId(currentInstruction, putKind, propertyName, doneCases, slowCases);
1344 ConcurrentJITLocker locker(m_codeBlock->m_lock);
1345 LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
1346 patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
1347 patchBuffer.link(doneCases, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
1348 for (const auto& callSite : m_calls) {
1350 patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
1352 gen.finalize(patchBuffer);
1354 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1355 m_codeBlock, patchBuffer,
1356 ("Baseline put_by_val%s with cached property name '%s' stub for %s, return point %p", (putKind == Direct) ? "_direct" : "", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()));
1357 byValInfo->stubInfo = gen.stubInfo();
1359 MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
1360 MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric));
1364 JIT::JumpList JIT::emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType)
1369 RegisterID base = regT0;
1370 RegisterID property = regT1;
1371 JSValueRegs result = JSValueRegs(regT0);
1372 RegisterID scratch = regT3;
1374 RegisterID base = regT0;
1375 RegisterID property = regT2;
1376 JSValueRegs result = JSValueRegs(regT1, regT0);
1377 RegisterID scratch = regT3;
1380 load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1381 badType = patchableBranch32(NotEqual, scratch, TrustedImm32(DirectArgumentsType));
1383 slowCases.append(branch32(AboveOrEqual, property, Address(base, DirectArguments::offsetOfLength())));
1384 slowCases.append(branchTestPtr(NonZero, Address(base, DirectArguments::offsetOfOverrides())));
1386 zeroExtend32ToPtr(property, scratch);
1387 loadValue(BaseIndex(base, scratch, TimesEight, DirectArguments::storageOffset()), result);
1392 JIT::JumpList JIT::emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType)
1397 RegisterID base = regT0;
1398 RegisterID property = regT1;
1399 JSValueRegs result = JSValueRegs(regT0);
1400 RegisterID scratch = regT3;
1401 RegisterID scratch2 = regT4;
1403 RegisterID base = regT0;
1404 RegisterID property = regT2;
1405 JSValueRegs result = JSValueRegs(regT1, regT0);
1406 RegisterID scratch = regT3;
1407 RegisterID scratch2 = regT4;
1410 load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1411 badType = patchableBranch32(NotEqual, scratch, TrustedImm32(ScopedArgumentsType));
1412 slowCases.append(branch32(AboveOrEqual, property, Address(base, ScopedArguments::offsetOfTotalLength())));
1414 loadPtr(Address(base, ScopedArguments::offsetOfTable()), scratch);
1415 load32(Address(scratch, ScopedArgumentsTable::offsetOfLength()), scratch2);
1416 Jump overflowCase = branch32(AboveOrEqual, property, scratch2);
1417 loadPtr(Address(base, ScopedArguments::offsetOfScope()), scratch2);
1418 loadPtr(Address(scratch, ScopedArgumentsTable::offsetOfArguments()), scratch);
1419 load32(BaseIndex(scratch, property, TimesFour), scratch);
1420 slowCases.append(branch32(Equal, scratch, TrustedImm32(ScopeOffset::invalidOffset)));
1421 loadValue(BaseIndex(scratch2, scratch, TimesEight, JSEnvironmentRecord::offsetOfVariables()), result);
1423 overflowCase.link(this);
1424 sub32(property, scratch2);
1426 loadValue(BaseIndex(base, scratch2, TimesEight, ScopedArguments::overflowStorageOffset()), result);
1427 slowCases.append(branchIfEmpty(result));
1433 JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
1435 ASSERT(isInt(type));
1437 // The best way to test the array type is to use the classInfo. We need to do so without
1438 // clobbering the register that holds the indexing type, base, and property.
1441 RegisterID base = regT0;
1442 RegisterID property = regT1;
1443 RegisterID resultPayload = regT0;
1444 RegisterID scratch = regT3;
1446 RegisterID base = regT0;
1447 RegisterID property = regT2;
1448 RegisterID resultPayload = regT0;
1449 RegisterID resultTag = regT1;
1450 RegisterID scratch = regT3;
1455 load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1456 badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
1457 slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
1458 slowCases.append(loadTypedArrayVector(base, scratch));
1460 switch (elementSize(type)) {
1463 load8SignedExtendTo32(BaseIndex(scratch, property, TimesOne), resultPayload);
1465 load8(BaseIndex(scratch, property, TimesOne), resultPayload);
1469 load16SignedExtendTo32(BaseIndex(scratch, property, TimesTwo), resultPayload);
1471 load16(BaseIndex(scratch, property, TimesTwo), resultPayload);
1474 load32(BaseIndex(scratch, property, TimesFour), resultPayload);
1481 if (type == TypeUint32) {
1482 Jump canBeInt = branch32(GreaterThanOrEqual, resultPayload, TrustedImm32(0));
1484 convertInt32ToDouble(resultPayload, fpRegT0);
1485 addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
1487 moveDoubleTo64(fpRegT0, resultPayload);
1488 sub64(tagTypeNumberRegister, resultPayload);
1490 moveDoubleToInts(fpRegT0, resultPayload, resultTag);
1494 canBeInt.link(this);
1498 or64(tagTypeNumberRegister, resultPayload);
1500 move(TrustedImm32(JSValue::Int32Tag), resultTag);
1507 JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
1509 ASSERT(isFloat(type));
1512 RegisterID base = regT0;
1513 RegisterID property = regT1;
1514 RegisterID resultPayload = regT0;
1515 RegisterID scratch = regT3;
1517 RegisterID base = regT0;
1518 RegisterID property = regT2;
1519 RegisterID resultPayload = regT0;
1520 RegisterID resultTag = regT1;
1521 RegisterID scratch = regT3;
1526 load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
1527 badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
1528 slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
1529 slowCases.append(loadTypedArrayVector(base, scratch));
1531 switch (elementSize(type)) {
1533 loadFloat(BaseIndex(scratch, property, TimesFour), fpRegT0);
1534 convertFloatToDouble(fpRegT0, fpRegT0);
1537 loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0);
1544 Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
1545 static const double NaN = PNaN;
1546 loadDouble(TrustedImmPtr(&NaN), fpRegT0);
1550 moveDoubleTo64(fpRegT0, resultPayload);
1551 sub64(tagTypeNumberRegister, resultPayload);
1553 moveDoubleToInts(fpRegT0, resultPayload, resultTag);
1558 JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
1560 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
1561 ASSERT(isInt(type));
1563 int value = currentInstruction[3].u.operand;
1566 RegisterID base = regT0;
1567 RegisterID property = regT1;
1568 RegisterID earlyScratch = regT3;
1569 RegisterID lateScratch = regT2;
1571 RegisterID base = regT0;
1572 RegisterID property = regT2;
1573 RegisterID earlyScratch = regT3;
1574 RegisterID lateScratch = regT1;
1579 load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
1580 badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
1581 Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
1582 emitArrayProfileOutOfBoundsSpecialCase(profile);
1584 inBounds.link(this);
1587 emitGetVirtualRegister(value, earlyScratch);
1588 slowCases.append(emitJumpIfNotInt(earlyScratch));
1590 emitLoad(value, lateScratch, earlyScratch);
1591 slowCases.append(branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag)));
1594 // We would be loading this into base as in get_by_val, except that the slow
1595 // path expects the base to be unclobbered.
1596 slowCases.append(loadTypedArrayVector(base, lateScratch));
1598 if (isClamped(type)) {
1599 ASSERT(elementSize(type) == 1);
1600 ASSERT(!isSigned(type));
1601 Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff));
1602 Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff));
1603 xor32(earlyScratch, earlyScratch);
1604 Jump clamped = jump();
1606 move(TrustedImm32(0xff), earlyScratch);
1608 inBounds.link(this);
1611 switch (elementSize(type)) {
1613 store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne));
1616 store16(earlyScratch, BaseIndex(lateScratch, property, TimesTwo));
1619 store32(earlyScratch, BaseIndex(lateScratch, property, TimesFour));
1630 JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
1632 ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
1633 ASSERT(isFloat(type));
1635 int value = currentInstruction[3].u.operand;
1638 RegisterID base = regT0;
1639 RegisterID property = regT1;
1640 RegisterID earlyScratch = regT3;
1641 RegisterID lateScratch = regT2;
1643 RegisterID base = regT0;
1644 RegisterID property = regT2;
1645 RegisterID earlyScratch = regT3;
1646 RegisterID lateScratch = regT1;
1651 load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
1652 badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
1653 Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
1654 emitArrayProfileOutOfBoundsSpecialCase(profile);
1656 inBounds.link(this);
1659 emitGetVirtualRegister(value, earlyScratch);
1660 Jump doubleCase = emitJumpIfNotInt(earlyScratch);
1661 convertInt32ToDouble(earlyScratch, fpRegT0);
1662 Jump ready = jump();
1663 doubleCase.link(this);
1664 slowCases.append(emitJumpIfNotNumber(earlyScratch));
1665 add64(tagTypeNumberRegister, earlyScratch);
1666 move64ToDouble(earlyScratch, fpRegT0);
1669 emitLoad(value, lateScratch, earlyScratch);
1670 Jump doubleCase = branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag));
1671 convertInt32ToDouble(earlyScratch, fpRegT0);
1672 Jump ready = jump();
1673 doubleCase.link(this);
1674 slowCases.append(branch32(Above, lateScratch, TrustedImm32(JSValue::LowestTag)));
1675 moveIntsToDouble(earlyScratch, lateScratch, fpRegT0, fpRegT1);
1679 // We would be loading this into base as in get_by_val, except that the slow
1680 // path expects the base to be unclobbered.
1681 slowCases.append(loadTypedArrayVector(base, lateScratch));
1683 switch (elementSize(type)) {
1685 convertDoubleToFloat(fpRegT0, fpRegT0);
1686 storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour));
1689 storeDouble(fpRegT0, BaseIndex(lateScratch, property, TimesEight));
1702 #endif // ENABLE(JIT)