2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
34 /* Deprecated: Please use JITStubCall instead. */
36 ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
38 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
39 peek(dst, argumentStackOffset);
42 ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
44 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
47 ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
49 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
50 return m_codeBlock->getConstant(src);
53 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
55 storePtr(from, payloadFor(entry, callFrameRegister));
58 ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
61 store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
63 storePtr(from, payloadFor(entry, callFrameRegister));
66 ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
68 store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
69 store32(from, intPayloadFor(entry, callFrameRegister));
72 ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
74 storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
77 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
79 loadPtr(Address(from, entry * sizeof(Register)), to);
81 killLastResultRegister();
85 ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
87 failures.append(branchPtr(NotEqual, Address(src, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
88 failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
89 loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
90 failures.append(branchTest32(Zero, dst));
91 loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplFlagsOffset()), regT1);
92 loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
96 is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
97 load8(MacroAssembler::Address(dst, 0), dst);
98 cont8Bit.append(jump());
100 load16(MacroAssembler::Address(dst, 0), dst);
104 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
106 load32(Address(from, entry * sizeof(Register)), to);
108 killLastResultRegister();
112 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
114 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
116 Call nakedCall = nearCall();
117 m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
121 ALWAYS_INLINE bool JIT::atJumpTarget()
123 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
124 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
126 ++m_jumpTargetsPosition;
131 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
133 ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
135 JSInterfaceJIT::beginUninterruptedSequence();
136 #if CPU(ARM_TRADITIONAL)
138 // Ensure the label after the sequence can also fit
139 insnSpace += sizeof(ARMWord);
140 constSpace += sizeof(uint64_t);
143 ensureSpace(insnSpace, constSpace);
147 insnSpace += sizeof(SH4Word);
148 constSpace += sizeof(uint64_t);
151 m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
154 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
156 m_uninterruptedInstructionSequenceBegin = label();
157 m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
162 ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
165 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
166 /* There are several cases when the uninterrupted sequence is larger than
167 * maximum required offset for pathing the same sequence. Eg.: if in a
168 * uninterrupted sequence the last macroassembler's instruction is a stub
169 * call, it emits store instruction(s) which should not be included in the
170 * calculation of length of uninterrupted sequence. So, the insnSpace and
171 * constSpace should be upper limit instead of hard limit.
174 if ((dst > 15) || (dst < -16)) {
179 if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
182 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
183 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
185 JSInterfaceJIT::endUninterruptedSequence();
192 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
194 move(linkRegister, reg);
197 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
199 move(reg, linkRegister);
202 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
204 loadPtr(address, linkRegister);
208 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
210 m_assembler.stspr(reg);
213 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
215 m_assembler.ldspr(reg);
218 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
220 loadPtrLinkReg(address);
225 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
227 move(returnAddressRegister, reg);
230 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
232 move(reg, returnAddressRegister);
235 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
237 loadPtr(address, returnAddressRegister);
240 #else // CPU(X86) || CPU(X86_64)
242 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
247 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
252 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
259 ALWAYS_INLINE void JIT::restoreArgumentReference()
261 move(stackPointerRegister, firstArgumentRegister);
262 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
265 ALWAYS_INLINE void JIT::updateTopCallFrame()
267 ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
268 if (m_bytecodeOffset) {
269 #if USE(JSVALUE32_64)
270 storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
272 store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
275 storePtr(callFrameRegister, &m_globalData->topCallFrame);
278 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
281 // Within a trampoline the return address will be on the stack at this point.
282 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
284 move(stackPointerRegister, firstArgumentRegister);
286 move(stackPointerRegister, firstArgumentRegister);
288 // In the trampoline on x86-64, the first argument register is not overwritten.
291 ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
293 return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
296 ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
298 if (!m_codeBlock->isKnownNotImmediate(vReg))
302 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
304 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
306 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
309 ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
311 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
313 const JumpList::JumpVector& jumpVector = jumpList.jumps();
314 size_t size = jumpVector.size();
315 for (size_t i = 0; i < size; ++i)
316 m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
319 ALWAYS_INLINE void JIT::addSlowCase()
321 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
323 Jump emptyJump; // Doing it this way to make Windows happy.
324 m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
327 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
329 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
331 m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
334 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
336 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
338 jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
341 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
343 return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
346 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type)
348 loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg);
349 return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type));
352 #if ENABLE(SAMPLING_FLAGS)
353 ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
357 or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
360 ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
364 and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
368 #if ENABLE(SAMPLING_COUNTERS)
369 ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count)
371 add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
375 #if ENABLE(OPCODE_SAMPLING)
377 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
379 move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
380 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
383 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
385 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
390 #if ENABLE(CODEBLOCK_SAMPLING)
392 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
394 move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
395 storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
398 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
400 storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
405 ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
407 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
410 template <typename ClassType, bool destructor, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr)
412 MarkedAllocator* allocator = 0;
414 allocator = &m_globalData->heap.allocatorForObjectWithDestructor(sizeof(ClassType));
416 allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType));
417 loadPtr(&allocator->m_firstFreeCell, result);
418 addSlowCase(branchTestPtr(Zero, result));
420 // remove the object from the free list
421 loadPtr(Address(result), storagePtr);
422 storePtr(storagePtr, &allocator->m_firstFreeCell);
424 // initialize the object's structure
425 storePtr(structure, Address(result, JSCell::structureOffset()));
427 // initialize the object's classInfo pointer
428 storePtr(TrustedImmPtr(&ClassType::s_info), Address(result, JSCell::classInfoOffset()));
430 // initialize the inheritor ID
431 storePtr(TrustedImmPtr(0), Address(result, JSObject::offsetOfInheritorID()));
433 // initialize the object's property storage pointer
434 addPtr(TrustedImm32(sizeof(JSObject)), result, storagePtr);
435 storePtr(storagePtr, Address(result, ClassType::offsetOfPropertyStorage()));
438 template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch)
440 emitAllocateBasicJSObject<JSFinalObject, false, T>(structure, result, scratch);
443 inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, RegisterID scopeChain, RegisterID result, RegisterID storagePtr)
445 emitAllocateBasicJSObject<JSFunction, true>(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), result, storagePtr);
447 // store the function's scope chain
448 storePtr(scopeChain, Address(result, JSFunction::offsetOfScopeChain()));
450 // store the function's executable member
451 storePtr(TrustedImmPtr(executable), Address(result, JSFunction::offsetOfExecutable()));
453 // store the function's name
454 ASSERT(executable->nameValue());
455 int functionNameOffset = sizeof(JSValue) * m_codeBlock->globalObject()->functionNameOffset();
456 storePtr(TrustedImmPtr(executable->nameValue()), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
457 #if USE(JSVALUE32_64)
458 store32(TrustedImm32(JSValue::CellTag), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
462 inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result, RegisterID storagePtr)
464 CopiedAllocator* allocator = &m_globalData->heap.storageAllocator();
466 // FIXME: We need to check for wrap-around.
467 // Check to make sure that the allocation will fit in the current block.
468 loadPtr(&allocator->m_currentOffset, result);
469 addPtr(TrustedImm32(size), result);
470 loadPtr(&allocator->m_currentBlock, storagePtr);
471 addPtr(TrustedImm32(HeapBlock::s_blockSize), storagePtr);
472 addSlowCase(branchPtr(AboveOrEqual, result, storagePtr));
474 // Load the original offset.
475 loadPtr(&allocator->m_currentOffset, result);
477 // Bump the pointer forward.
478 move(result, storagePtr);
479 addPtr(TrustedImm32(size), storagePtr);
480 storePtr(storagePtr, &allocator->m_currentOffset);
483 inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr)
485 unsigned initialLength = std::max(length, 4U);
486 size_t initialStorage = JSArray::storageSize(initialLength);
488 // Allocate the cell for the array.
489 emitAllocateBasicJSObject<JSArray, false>(TrustedImmPtr(m_codeBlock->globalObject()->arrayStructure()), cellResult, storagePtr);
491 // Allocate the backing store for the array.
492 emitAllocateBasicStorage(initialStorage, storageResult, storagePtr);
494 // Store all the necessary info in the ArrayStorage.
495 storePtr(storageResult, Address(storageResult, ArrayStorage::allocBaseOffset()));
496 store32(Imm32(length), Address(storageResult, ArrayStorage::lengthOffset()));
497 store32(Imm32(length), Address(storageResult, ArrayStorage::numValuesInVectorOffset()));
499 // Store the newly allocated ArrayStorage.
500 storePtr(storageResult, Address(cellResult, JSArray::storageOffset()));
502 // Store the vector length and index bias.
503 store32(Imm32(initialLength), Address(cellResult, JSArray::vectorLengthOffset()));
504 store32(TrustedImm32(0), Address(cellResult, JSArray::indexBiasOffset()));
506 // Initialize the sparse value map.
507 storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::sparseValueMapOffset()));
509 // Store the values we have.
510 for (unsigned i = 0; i < length; i++) {
512 loadPtr(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
513 storePtr(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
515 load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
516 store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
517 load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register) + sizeof(uint32_t)), storagePtr);
518 store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + sizeof(uint32_t)));
522 // Zero out the remaining slots.
523 for (unsigned i = length; i < initialLength; i++) {
525 storePtr(TrustedImmPtr(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
527 store32(TrustedImm32(static_cast<int>(JSValue::EmptyValueTag)), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
528 store32(TrustedImm32(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
533 #if ENABLE(VALUE_PROFILER)
534 inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
536 ASSERT(shouldEmitProfiling());
537 ASSERT(valueProfile);
539 const RegisterID value = regT0;
540 #if USE(JSVALUE32_64)
541 const RegisterID valueTag = regT1;
543 const RegisterID scratch = regT3;
545 if (ValueProfile::numberOfBuckets == 1) {
546 // We're in a simple configuration: only one bucket, so we can just do a direct
549 storePtr(value, valueProfile->m_buckets);
551 EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
552 store32(value, &descriptor->asBits.payload);
553 store32(valueTag, &descriptor->asBits.tag);
558 if (m_randomGenerator.getUint32() & 1)
559 add32(TrustedImm32(1), bucketCounterRegister);
561 add32(TrustedImm32(3), bucketCounterRegister);
562 and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
563 move(TrustedImmPtr(valueProfile->m_buckets), scratch);
565 storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
566 #elif USE(JSVALUE32_64)
567 store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
568 store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
572 inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset)
574 if (!shouldEmitProfiling())
576 emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset));
579 inline void JIT::emitValueProfilingSite()
581 emitValueProfilingSite(m_bytecodeOffset);
585 #if USE(JSVALUE32_64)
587 inline void JIT::emitLoadTag(int index, RegisterID tag)
589 RegisterID mappedTag;
590 if (getMappedTag(index, mappedTag)) {
591 move(mappedTag, tag);
596 if (m_codeBlock->isConstantRegisterIndex(index)) {
597 move(Imm32(getConstantOperand(index).tag()), tag);
602 load32(tagFor(index), tag);
606 inline void JIT::emitLoadPayload(int index, RegisterID payload)
608 RegisterID mappedPayload;
609 if (getMappedPayload(index, mappedPayload)) {
610 move(mappedPayload, payload);
615 if (m_codeBlock->isConstantRegisterIndex(index)) {
616 move(Imm32(getConstantOperand(index).payload()), payload);
621 load32(payloadFor(index), payload);
625 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
627 move(Imm32(v.payload()), payload);
628 move(Imm32(v.tag()), tag);
631 inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
633 ASSERT(tag != payload);
635 if (base == callFrameRegister) {
636 ASSERT(payload != base);
637 emitLoadPayload(index, payload);
638 emitLoadTag(index, tag);
642 if (payload == base) { // avoid stomping base
643 load32(tagFor(index, base), tag);
644 load32(payloadFor(index, base), payload);
648 load32(payloadFor(index, base), payload);
649 load32(tagFor(index, base), tag);
652 inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2)
654 if (isMapped(index1)) {
655 emitLoad(index1, tag1, payload1);
656 emitLoad(index2, tag2, payload2);
659 emitLoad(index2, tag2, payload2);
660 emitLoad(index1, tag1, payload1);
663 inline void JIT::emitLoadDouble(int index, FPRegisterID value)
665 if (m_codeBlock->isConstantRegisterIndex(index)) {
666 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
667 loadDouble(&inConstantPool, value);
669 loadDouble(addressFor(index), value);
672 inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
674 if (m_codeBlock->isConstantRegisterIndex(index)) {
675 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
676 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
677 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
679 convertInt32ToDouble(payloadFor(index), value);
682 inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base)
684 store32(payload, payloadFor(index, base));
685 store32(tag, tagFor(index, base));
688 inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32)
690 store32(payload, payloadFor(index, callFrameRegister));
692 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
695 inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength)
697 emitStoreInt32(index, payload, indexIsInt32);
698 map(m_bytecodeOffset + opcodeLength, index, tag, payload);
701 inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32)
703 store32(payload, payloadFor(index, callFrameRegister));
705 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
708 inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell)
710 store32(payload, payloadFor(index, callFrameRegister));
712 store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
715 inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool)
717 store32(payload, payloadFor(index, callFrameRegister));
719 store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister));
722 inline void JIT::emitStoreDouble(int index, FPRegisterID value)
724 storeDouble(value, addressFor(index));
727 inline void JIT::emitStore(int index, const JSValue constant, RegisterID base)
729 store32(Imm32(constant.payload()), payloadFor(index, base));
730 store32(Imm32(constant.tag()), tagFor(index, base));
733 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
735 emitStore(dst, jsUndefined());
738 inline bool JIT::isLabeled(unsigned bytecodeOffset)
740 for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
741 unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
742 if (jumpTarget == bytecodeOffset)
744 if (jumpTarget > bytecodeOffset)
750 inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload)
752 if (isLabeled(bytecodeOffset))
755 m_mappedBytecodeOffset = bytecodeOffset;
756 m_mappedVirtualRegisterIndex = virtualRegisterIndex;
758 m_mappedPayload = payload;
760 ASSERT(!canBeOptimized() || m_mappedPayload == regT0);
761 ASSERT(!canBeOptimized() || m_mappedTag == regT1);
764 inline void JIT::unmap(RegisterID registerID)
766 if (m_mappedTag == registerID)
767 m_mappedTag = (RegisterID)-1;
768 else if (m_mappedPayload == registerID)
769 m_mappedPayload = (RegisterID)-1;
772 inline void JIT::unmap()
774 m_mappedBytecodeOffset = (unsigned)-1;
775 m_mappedVirtualRegisterIndex = RegisterFile::ReturnPC;
776 m_mappedTag = (RegisterID)-1;
777 m_mappedPayload = (RegisterID)-1;
780 inline bool JIT::isMapped(int virtualRegisterIndex)
782 if (m_mappedBytecodeOffset != m_bytecodeOffset)
784 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
789 inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload)
791 if (m_mappedBytecodeOffset != m_bytecodeOffset)
793 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
795 if (m_mappedPayload == (RegisterID)-1)
797 payload = m_mappedPayload;
801 inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag)
803 if (m_mappedBytecodeOffset != m_bytecodeOffset)
805 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
807 if (m_mappedTag == (RegisterID)-1)
813 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex)
815 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
816 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
819 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
823 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag)
825 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
826 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
829 addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
833 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
835 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
838 ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
840 if (isOperandConstantImmediateInt(op1)) {
841 constant = getConstantOperand(op1).asInt32();
846 if (isOperandConstantImmediateInt(op2)) {
847 constant = getConstantOperand(op2).asInt32();
855 #else // USE(JSVALUE32_64)
857 ALWAYS_INLINE void JIT::killLastResultRegister()
859 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
862 // get arg puts an arg from the SF register array into a h/w register
863 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
865 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
867 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
868 if (m_codeBlock->isConstantRegisterIndex(src)) {
869 JSValue value = m_codeBlock->getConstant(src);
870 if (!value.isNumber())
871 move(TrustedImmPtr(JSValue::encode(value)), dst);
873 move(ImmPtr(JSValue::encode(value)), dst);
874 killLastResultRegister();
878 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) {
879 // The argument we want is already stored in eax
880 if (dst != cachedResultRegister)
881 move(cachedResultRegister, dst);
882 killLastResultRegister();
886 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
887 killLastResultRegister();
890 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
892 if (src2 == m_lastResultBytecodeRegister) {
893 emitGetVirtualRegister(src2, dst2);
894 emitGetVirtualRegister(src1, dst1);
896 emitGetVirtualRegister(src1, dst1);
897 emitGetVirtualRegister(src2, dst2);
901 ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
903 return getConstantOperand(src).asInt32();
906 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
908 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
911 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
913 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
914 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
917 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
919 storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
922 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
925 return branchTestPtr(Zero, reg, tagMaskRegister);
927 return branchTest32(Zero, reg, TrustedImm32(TagMask));
931 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
934 orPtr(reg2, scratch);
935 return emitJumpIfJSCell(scratch);
938 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
940 addSlowCase(emitJumpIfJSCell(reg));
943 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
946 return branchTestPtr(NonZero, reg, tagMaskRegister);
948 return branchTest32(NonZero, reg, TrustedImm32(TagMask));
952 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
954 addSlowCase(emitJumpIfNotJSCell(reg));
957 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
959 if (!m_codeBlock->isKnownNotImmediate(vReg))
960 emitJumpSlowCaseIfNotJSCell(reg);
965 inline void JIT::emitLoadDouble(int index, FPRegisterID value)
967 if (m_codeBlock->isConstantRegisterIndex(index)) {
968 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
969 loadDouble(&inConstantPool, value);
971 loadDouble(addressFor(index), value);
974 inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
976 if (m_codeBlock->isConstantRegisterIndex(index)) {
977 ASSERT(isOperandConstantImmediateInt(index));
978 convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
980 convertInt32ToDouble(addressFor(index), value);
984 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
987 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
989 return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
993 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
996 return branchPtr(Below, reg, tagTypeNumberRegister);
998 return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
1002 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
1004 move(reg1, scratch);
1005 andPtr(reg2, scratch);
1006 return emitJumpIfNotImmediateInteger(scratch);
1009 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
1011 addSlowCase(emitJumpIfNotImmediateInteger(reg));
1014 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
1016 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
1019 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
1021 addSlowCase(emitJumpIfNotImmediateNumber(reg));
1024 #if USE(JSVALUE32_64)
1025 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
1027 subPtr(TrustedImm32(TagTypeNumber), reg);
1030 ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
1032 return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
1036 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
1039 emitFastArithIntToImmNoCheck(src, dest);
1043 addPtr(TrustedImm32(TagTypeNumber), dest);
1047 // operand is int32_t, must have been zero-extended if register is 64-bit.
1048 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
1053 orPtr(tagTypeNumberRegister, dest);
1055 signExtend32ToPtr(src, dest);
1057 emitFastArithReTagImmediate(dest, dest);
1061 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
1063 or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
1066 #endif // USE(JSVALUE32_64)
1070 #endif // ENABLE(JIT)