2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "JSFunction.h"
34 #include "Interpreter.h"
35 #include "ResultType.h"
36 #include "SamplingTool.h"
42 #define __ m_assembler.
45 #undef FIELD_OFFSET // Fix conflict with winnt.h.
48 // FIELD_OFFSET: Like the C++ offsetof macro, but you can use it with classes.
49 // The magic number 0x4000 is insignificant. We use it to avoid using NULL, since
50 // NULL can cause compiler problems, especially in cases of multiple inheritance.
51 #define FIELD_OFFSET(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
57 typedef X86Assembler::JmpSrc JmpSrc;
61 static inline bool isSSE2Present()
63 return true; // All X86 Macs are guaranteed to support at least SSE2
68 static bool isSSE2Present()
70 static const int SSE2FeatureBit = 1 << 26;
77 mov eax, 1 // cpuid function 1 gives us the standard feature set
83 // FIXME: Add GCC code to do above asm
85 present = (flags & SSE2FeatureBit) != 0;
89 static SSE2Check check;
95 COMPILE_ASSERT(CTI_ARGS_code == 0xC, CTI_ARGS_code_is_C);
96 COMPILE_ASSERT(CTI_ARGS_callFrame == 0xE, CTI_ARGS_callFrame_is_E);
98 #if COMPILER(GCC) && PLATFORM(X86)
101 #define SYMBOL_STRING(name) "_" #name
103 #define SYMBOL_STRING(name) #name
107 ".globl " SYMBOL_STRING(ctiTrampoline) "\n"
108 SYMBOL_STRING(ctiTrampoline) ":" "\n"
112 "subl $0x20, %esp" "\n"
113 "movl $512, %esi" "\n"
114 "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = CTI_ARGS_callFrame (see assertion above)
115 "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
116 "addl $0x20, %esp" "\n"
124 ".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
125 SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
126 #if USE(CTI_ARGUMENT)
127 #if USE(FAST_CALL_CTI_ARGUMENT)
128 "movl %esp, %ecx" "\n"
130 "movl %esp, 0(%esp)" "\n"
132 "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv) "\n"
134 "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPvz) "\n"
136 "addl $0x20, %esp" "\n"
147 __declspec(naked) JSValue* ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue** exception, Profiler**, JSGlobalData*)
156 mov edi, [esp + 0x38];
157 call [esp + 0x30]; // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
166 __declspec(naked) void ctiVMThrowTrampoline()
170 call JSC::Interpreter::cti_vm_throw;
183 static ALWAYS_INLINE uintptr_t asInteger(JSValue* value)
185 return reinterpret_cast<uintptr_t>(value);
188 ALWAYS_INLINE void JIT::killLastResultRegister()
190 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
193 // get arg puts an arg from the SF register array into a h/w register
194 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst, unsigned currentInstructionIndex)
196 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
197 if (m_codeBlock->isConstantRegisterIndex(src)) {
198 JSValue* value = m_codeBlock->getConstant(src);
199 __ movl_i32r(asInteger(value), dst);
200 killLastResultRegister();
204 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
205 bool atJumpTarget = false;
206 while (m_jumpTargetsPosition < m_codeBlock->jumpTargets.size() && m_codeBlock->jumpTargets[m_jumpTargetsPosition] <= currentInstructionIndex) {
207 if (m_codeBlock->jumpTargets[m_jumpTargetsPosition] == currentInstructionIndex)
209 ++m_jumpTargetsPosition;
213 // The argument we want is already stored in eax
215 __ movl_rr(X86::eax, dst);
216 killLastResultRegister();
221 __ movl_mr(src * sizeof(Register), X86::edi, dst);
222 killLastResultRegister();
225 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2, unsigned i)
227 if (src2 == m_lastResultBytecodeRegister) {
228 emitGetVirtualRegister(src2, dst2, i);
229 emitGetVirtualRegister(src1, dst1, i);
231 emitGetVirtualRegister(src1, dst1, i);
232 emitGetVirtualRegister(src2, dst2, i);
236 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
237 ALWAYS_INLINE void JIT::emitPutCTIArgFromVirtualRegister(unsigned src, unsigned offset, RegisterID scratch)
239 if (m_codeBlock->isConstantRegisterIndex(src)) {
240 JSValue* value = m_codeBlock->getConstant(src);
241 __ movl_i32m(asInteger(value), offset + sizeof(void*), X86::esp);
243 __ movl_mr(src * sizeof(Register), X86::edi, scratch);
244 __ movl_rm(scratch, offset + sizeof(void*), X86::esp);
247 killLastResultRegister();
250 // puts an arg onto the stack, as an arg to a context threaded function.
251 ALWAYS_INLINE void JIT::emitPutCTIArg(RegisterID src, unsigned offset)
253 __ movl_rm(src, offset + sizeof(void*), X86::esp);
256 ALWAYS_INLINE void JIT::emitGetCTIArg(unsigned offset, RegisterID dst)
258 __ movl_mr(offset + sizeof(void*), X86::esp, dst);
262 ALWAYS_INLINE void JIT::emitPutCTIArgConstant(unsigned value, unsigned offset)
264 __ movl_i32m(value, offset + sizeof(void*), X86::esp);
267 ALWAYS_INLINE JSValue* JIT::getConstantImmediateNumericArg(unsigned src)
269 if (m_codeBlock->isConstantRegisterIndex(src)) {
270 JSValue* value = m_codeBlock->getConstant(src);
271 return JSImmediate::isNumber(value) ? value : noValue();
276 ALWAYS_INLINE void JIT::emitPutCTIParam(void* value, unsigned name)
278 __ movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
281 ALWAYS_INLINE void JIT::emitPutCTIParam(RegisterID from, unsigned name)
283 __ movl_rm(from, name * sizeof(void*), X86::esp);
286 ALWAYS_INLINE void JIT::emitGetCTIParam(unsigned name, RegisterID to)
288 __ movl_mr(name * sizeof(void*), X86::esp, to);
289 killLastResultRegister();
292 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
294 __ movl_rm(from, entry * sizeof(Register), X86::edi);
297 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to)
299 __ movl_mr(entry * sizeof(Register), X86::edi, to);
300 killLastResultRegister();
303 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
305 __ movl_rm(from, dst * sizeof(Register), X86::edi);
306 m_lastResultBytecodeRegister = (from == X86::eax) ? dst : std::numeric_limits<int>::max();
307 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
310 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
312 __ movl_i32m(asInteger(jsUndefined()), dst * sizeof(Register), X86::edi);
313 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
316 void ctiSetReturnAddress(void** where, void* what)
321 void ctiRepatchCallByReturnAddress(void* where, void* what)
323 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
328 void JIT::printBytecodeOperandTypes(unsigned src1, unsigned src2)
331 if (m_codeBlock->isConstantRegisterIndex(src1)) {
332 JSValue* value = m_codeBlock->getConstant(src1);
334 JSImmediate::isImmediate(value) ?
335 (JSImmediate::isNumber(value) ? 'i' :
336 JSImmediate::isBoolean(value) ? 'b' :
337 value->isUndefined() ? 'u' :
338 value->isNull() ? 'n' : '?')
340 (value->isString() ? 's' :
341 value->isObject() ? 'o' :
345 if (m_codeBlock->isConstantRegisterIndex(src2)) {
346 JSValue* value = m_codeBlock->getConstant(src2);
348 JSImmediate::isImmediate(value) ?
349 (JSImmediate::isNumber(value) ? 'i' :
350 JSImmediate::isBoolean(value) ? 'b' :
351 value->isUndefined() ? 'u' :
352 value->isNull() ? 'n' : '?')
354 (value->isString() ? 's' :
355 value->isObject() ? 'o' :
358 if ((which1 != '*') | (which2 != '*'))
359 fprintf(stderr, "Types %c %c\n", which1, which2);
364 ALWAYS_INLINE JmpSrc JIT::emitNakedCall(unsigned bytecodeIndex, X86::RegisterID r)
366 JmpSrc call = __ call(r);
367 m_calls.append(CallRecord(call, bytecodeIndex));
372 ALWAYS_INLINE JmpSrc JIT::emitNakedCall(unsigned bytecodeIndex, void* function)
374 JmpSrc call = __ call();
375 m_calls.append(CallRecord(call, reinterpret_cast<CTIHelper_v>(function), bytecodeIndex));
379 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_j helper)
381 #if ENABLE(OPCODE_SAMPLING)
382 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
384 __ restoreArgumentReference();
385 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
386 JmpSrc call = __ call();
387 m_calls.append(CallRecord(call, helper, bytecodeIndex));
388 #if ENABLE(OPCODE_SAMPLING)
389 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
391 killLastResultRegister();
396 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_o helper)
398 #if ENABLE(OPCODE_SAMPLING)
399 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
401 __ restoreArgumentReference();
402 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
403 JmpSrc call = __ call();
404 m_calls.append(CallRecord(call, helper, bytecodeIndex));
405 #if ENABLE(OPCODE_SAMPLING)
406 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
408 killLastResultRegister();
413 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_p helper)
415 #if ENABLE(OPCODE_SAMPLING)
416 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
418 __ restoreArgumentReference();
419 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
420 JmpSrc call = __ call();
421 m_calls.append(CallRecord(call, helper, bytecodeIndex));
422 #if ENABLE(OPCODE_SAMPLING)
423 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
425 killLastResultRegister();
430 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_b helper)
432 #if ENABLE(OPCODE_SAMPLING)
433 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
435 __ restoreArgumentReference();
436 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
437 JmpSrc call = __ call();
438 m_calls.append(CallRecord(call, helper, bytecodeIndex));
439 #if ENABLE(OPCODE_SAMPLING)
440 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
442 killLastResultRegister();
447 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_v helper)
449 #if ENABLE(OPCODE_SAMPLING)
450 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
452 __ restoreArgumentReference();
453 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
454 JmpSrc call = __ call();
455 m_calls.append(CallRecord(call, helper, bytecodeIndex));
456 #if ENABLE(OPCODE_SAMPLING)
457 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
459 killLastResultRegister();
464 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_s helper)
466 #if ENABLE(OPCODE_SAMPLING)
467 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
469 __ restoreArgumentReference();
470 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
471 JmpSrc call = __ call();
472 m_calls.append(CallRecord(call, helper, bytecodeIndex));
473 #if ENABLE(OPCODE_SAMPLING)
474 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
476 killLastResultRegister();
481 ALWAYS_INLINE JmpSrc JIT::emitCTICall(unsigned bytecodeIndex, CTIHelper_2 helper)
483 #if ENABLE(OPCODE_SAMPLING)
484 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, true), m_interpreter->sampler()->sampleSlot());
486 __ restoreArgumentReference();
487 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
488 JmpSrc call = __ call();
489 m_calls.append(CallRecord(call, helper, bytecodeIndex));
490 #if ENABLE(OPCODE_SAMPLING)
491 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin() + bytecodeIndex, false), m_interpreter->sampler()->sampleSlot());
493 killLastResultRegister();
498 JmpSrc JIT::checkStructure(RegisterID reg, Structure* structure)
500 __ cmpl_i32m(reinterpret_cast<uint32_t>(structure), FIELD_OFFSET(JSCell, m_structure), reg);
504 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, unsigned bytecodeIndex)
506 __ testl_i32r(JSImmediate::TagMask, reg);
507 m_slowCases.append(SlowCaseEntry(__ jne(), bytecodeIndex));
510 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, unsigned bytecodeIndex, int vReg)
512 if (m_codeBlock->isKnownNotImmediate(vReg))
515 emitJumpSlowCaseIfNotJSCell(reg, bytecodeIndex);
518 ALWAYS_INLINE bool JIT::linkSlowCaseIfNotJSCell(const Vector<SlowCaseEntry>::iterator& iter, int vReg)
520 if (m_codeBlock->isKnownNotImmediate(vReg))
523 __ link(iter->from, __ label());
527 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmNum(RegisterID reg, unsigned bytecodeIndex)
529 __ testl_i32r(JSImmediate::TagBitTypeInteger, reg);
530 m_slowCases.append(SlowCaseEntry(__ je(), bytecodeIndex));
533 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmNums(RegisterID reg1, RegisterID reg2, unsigned bytecodeIndex)
535 __ movl_rr(reg1, X86::ecx);
536 __ andl_rr(reg2, X86::ecx);
537 emitJumpSlowCaseIfNotImmNum(X86::ecx, bytecodeIndex);
540 ALWAYS_INLINE unsigned JIT::getDeTaggedConstantImmediate(JSValue* imm)
542 ASSERT(JSImmediate::isNumber(imm));
543 return asInteger(imm) & ~JSImmediate::TagBitTypeInteger;
546 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
548 __ subl_i8r(JSImmediate::TagBitTypeInteger, reg);
551 ALWAYS_INLINE JmpSrc JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
553 __ subl_i8r(JSImmediate::TagBitTypeInteger, reg);
557 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID reg)
559 __ addl_i8r(JSImmediate::TagBitTypeInteger, reg);
562 ALWAYS_INLINE void JIT::emitFastArithPotentiallyReTagImmediate(RegisterID reg)
564 __ orl_i32r(JSImmediate::TagBitTypeInteger, reg);
567 ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
572 ALWAYS_INLINE void JIT::emitFastArithIntToImmOrSlowCase(RegisterID reg, unsigned bytecodeIndex)
574 __ addl_rr(reg, reg);
575 m_slowCases.append(SlowCaseEntry(__ jo(), bytecodeIndex));
576 emitFastArithReTagImmediate(reg);
579 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID reg)
581 __ addl_rr(reg, reg);
582 emitFastArithReTagImmediate(reg);
585 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
587 __ shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
588 __ orl_i32r(JSImmediate::FullTagTypeBool, reg);
591 JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
592 : m_assembler(globalData->interpreter->assemblerBuffer())
593 , m_interpreter(globalData->interpreter)
594 , m_globalData(globalData)
595 , m_codeBlock(codeBlock)
596 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
597 , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->propertyAccessInstructions.size() : 0)
598 , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->callLinkInfos.size() : 0)
599 , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
600 , m_jumpTargetsPosition(0)
604 #define CTI_COMPILE_BINARY_OP(name) \
606 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx); \
607 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx); \
608 emitCTICall(i, Interpreter::cti_##name); \
609 emitPutVirtualRegister(instruction[i + 1].u.operand); \
614 #define CTI_COMPILE_UNARY_OP(name) \
616 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx); \
617 emitCTICall(i, Interpreter::cti_##name); \
618 emitPutVirtualRegister(instruction[i + 1].u.operand); \
623 static void unreachable()
625 ASSERT_NOT_REACHED();
629 void JIT::compileOpCallInitializeCallFrame()
631 __ movl_rm(X86::edx, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)), X86::edi);
633 __ movl_mr(FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node), X86::ecx, X86::edx); // newScopeChain
635 __ movl_i32m(asInteger(noValue()), RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)), X86::edi);
636 __ movl_rm(X86::ecx, RegisterFile::Callee * static_cast<int>(sizeof(Register)), X86::edi);
637 __ movl_rm(X86::edx, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)), X86::edi);
640 void JIT::compileOpCallSetupArgs(Instruction* instruction)
642 int argCount = instruction[3].u.operand;
643 int registerOffset = instruction[4].u.operand;
646 emitPutCTIArg(X86::ecx, 0);
647 emitPutCTIArgConstant(registerOffset, 4);
648 emitPutCTIArgConstant(argCount, 8);
649 emitPutCTIArgConstant(reinterpret_cast<unsigned>(instruction), 12);
652 void JIT::compileOpCallEvalSetupArgs(Instruction* instruction)
654 int argCount = instruction[3].u.operand;
655 int registerOffset = instruction[4].u.operand;
658 emitPutCTIArg(X86::ecx, 0);
659 emitPutCTIArgConstant(registerOffset, 4);
660 emitPutCTIArgConstant(argCount, 8);
661 emitPutCTIArgConstant(reinterpret_cast<unsigned>(instruction), 12);
664 void JIT::compileOpConstructSetupArgs(Instruction* instruction)
666 int argCount = instruction[3].u.operand;
667 int registerOffset = instruction[4].u.operand;
668 int proto = instruction[5].u.operand;
669 int thisRegister = instruction[6].u.operand;
672 emitPutCTIArg(X86::ecx, 0);
673 emitPutCTIArgConstant(registerOffset, 4);
674 emitPutCTIArgConstant(argCount, 8);
675 emitPutCTIArgFromVirtualRegister(proto, 12, X86::eax);
676 emitPutCTIArgConstant(thisRegister, 16);
677 emitPutCTIArgConstant(reinterpret_cast<unsigned>(instruction), 20);
680 void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned i, unsigned callLinkInfoIndex)
682 int dst = instruction[1].u.operand;
683 int callee = instruction[2].u.operand;
684 int argCount = instruction[3].u.operand;
685 int registerOffset = instruction[4].u.operand;
689 if (opcodeID == op_call_eval) {
690 emitGetVirtualRegister(callee, X86::ecx, i);
691 compileOpCallEvalSetupArgs(instruction);
693 emitCTICall(i, Interpreter::cti_op_call_eval);
694 __ cmpl_i32r(asInteger(JSImmediate::impossibleValue()), X86::eax);
698 // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
699 // This deliberately leaves the callee in ecx, used when setting up the stack frame below
700 emitGetVirtualRegister(callee, X86::ecx, i);
701 __ cmpl_i32r(asInteger(JSImmediate::impossibleValue()), X86::ecx);
702 JmpDst addressOfLinkedFunctionCheck = __ label();
703 m_slowCases.append(SlowCaseEntry(__ jne(), i));
704 ASSERT(X86Assembler::getDifferenceBetweenLabels(addressOfLinkedFunctionCheck, __ label()) == repatchOffsetOpCallCall);
705 m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
707 // The following is the fast case, only used whan a callee can be linked.
709 // In the case of OpConstruct, call out to a cti_ function to create the new object.
710 if (opcodeID == op_construct) {
711 int proto = instruction[5].u.operand;
712 int thisRegister = instruction[6].u.operand;
714 emitPutCTIArg(X86::ecx, 0);
715 emitPutCTIArgFromVirtualRegister(proto, 12, X86::eax);
716 emitCTICall(i, Interpreter::cti_op_construct_JSConstruct);
717 emitPutVirtualRegister(thisRegister);
718 emitGetVirtualRegister(callee, X86::ecx, i);
721 // Fast version of stack frame initialization, directly relative to edi.
722 // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
723 __ movl_i32m(asInteger(noValue()), (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)), X86::edi);
724 __ movl_rm(X86::ecx, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)), X86::edi);
725 __ movl_mr(FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node), X86::ecx, X86::edx); // newScopeChain
726 __ movl_i32m(argCount, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)), X86::edi);
727 __ movl_rm(X86::edi, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)), X86::edi);
728 __ movl_rm(X86::edx, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)), X86::edi);
729 __ addl_i32r(registerOffset * sizeof(Register), X86::edi);
731 // Call to the callee
732 m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(i, reinterpret_cast<void*>(unreachable));
734 if (opcodeID == op_call_eval)
735 __ link(wasEval, __ label());
737 // Put the return value in dst. In the interpreter, op_ret does this.
738 emitPutVirtualRegister(dst);
740 #if ENABLE(CODEBLOCK_SAMPLING)
741 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
745 void JIT::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
747 bool negated = (type == OpNStrictEq);
749 unsigned dst = instruction[1].u.operand;
750 unsigned src1 = instruction[2].u.operand;
751 unsigned src2 = instruction[3].u.operand;
753 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx, i);
755 __ testl_i32r(JSImmediate::TagMask, X86::eax);
756 JmpSrc firstNotImmediate = __ je();
757 __ testl_i32r(JSImmediate::TagMask, X86::edx);
758 JmpSrc secondNotImmediate = __ je();
760 __ cmpl_rr(X86::edx, X86::eax);
762 __ setne_r(X86::eax);
765 __ movzbl_rr(X86::eax, X86::eax);
766 emitTagAsBoolImmediate(X86::eax);
768 JmpSrc bothWereImmediates = __ jmp();
770 __ link(firstNotImmediate, __ label());
772 // check that edx is immediate but not the zero immediate
773 __ testl_i32r(JSImmediate::TagMask, X86::edx);
775 __ movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
776 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::edx);
778 __ movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
779 __ orl_rr(X86::ecx, X86::edx);
781 m_slowCases.append(SlowCaseEntry(__ jnz(), i));
783 __ movl_i32r(asInteger(jsBoolean(negated)), X86::eax);
785 JmpSrc firstWasNotImmediate = __ jmp();
787 __ link(secondNotImmediate, __ label());
788 // check that eax is not the zero immediate (we know it must be immediate)
789 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);
790 m_slowCases.append(SlowCaseEntry(__ je(), i));
792 __ movl_i32r(asInteger(jsBoolean(negated)), X86::eax);
794 __ link(bothWereImmediates, __ label());
795 __ link(firstWasNotImmediate, __ label());
797 emitPutVirtualRegister(dst);
800 void JIT::emitSlowScriptCheck(unsigned bytecodeIndex)
802 __ subl_i8r(1, X86::esi);
803 JmpSrc skipTimeout = __ jne();
804 emitCTICall(bytecodeIndex, Interpreter::cti_timeout_check);
806 emitGetCTIParam(CTI_ARGS_globalData, X86::ecx);
807 __ movl_mr(FIELD_OFFSET(JSGlobalData, interpreter), X86::ecx, X86::ecx);
808 __ movl_mr(FIELD_OFFSET(Interpreter, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
809 __ link(skipTimeout, __ label());
811 killLastResultRegister();
815 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
817 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
818 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
820 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
821 control will fall through from the code planted.
823 void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
825 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
826 __ cvttsd2si_rr(xmmSource, tempReg1);
827 __ addl_rr(tempReg1, tempReg1);
828 __ sarl_i8r(1, tempReg1);
829 __ cvtsi2sd_rr(tempReg1, tempXmm);
830 // Compare & branch if immediate.
831 __ ucomis_rr(tempXmm, xmmSource);
832 JmpSrc resultIsImm = __ je();
833 JmpDst resultLookedLikeImmButActuallyIsnt = __ label();
835 // Store the result to the JSNumberCell and jump.
836 __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell);
837 if (jsNumberCell != X86::eax)
838 __ movl_rr(jsNumberCell, X86::eax);
839 emitPutVirtualRegister(dst);
840 *wroteJSNumberCell = __ jmp();
842 __ link(resultIsImm, __ label());
843 // value == (double)(JSImmediate)value... or at least, it looks that way...
844 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
845 __ link(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
846 __ pextrw_irr(3, xmmSource, tempReg2);
847 __ cmpl_i32r(0x8000, tempReg2);
848 __ link(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
849 // Yes it really really really is representable as a JSImmediate.
850 emitFastArithIntToImmNoCheck(tempReg1);
851 if (tempReg1 != X86::eax)
852 __ movl_rr(tempReg1, X86::eax);
853 emitPutVirtualRegister(dst);
856 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
858 Structure* numberStructure = m_globalData->numberStructure.get();
859 JmpSrc wasJSNumberCell1;
860 JmpSrc wasJSNumberCell1b;
861 JmpSrc wasJSNumberCell2;
862 JmpSrc wasJSNumberCell2b;
864 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx, i);
866 if (types.second().isReusable() && isSSE2Present()) {
867 ASSERT(types.second().mightBeNumber());
869 // Check op2 is a number
870 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
871 JmpSrc op2imm = __ jne();
872 if (!types.second().definitelyIsNumber()) {
873 emitJumpSlowCaseIfNotJSCell(X86::edx, i, src2);
874 __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
875 m_slowCases.append(SlowCaseEntry(__ jne(), i));
878 // (1) In this case src2 is a reusable number cell.
879 // Slow case if src1 is not a number type.
880 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
881 JmpSrc op1imm = __ jne();
882 if (!types.first().definitelyIsNumber()) {
883 emitJumpSlowCaseIfNotJSCell(X86::eax, i, src1);
884 __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
885 m_slowCases.append(SlowCaseEntry(__ jne(), i));
888 // (1a) if we get here, src1 is also a number cell
889 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
890 JmpSrc loadedDouble = __ jmp();
891 // (1b) if we get here, src1 is an immediate
892 __ link(op1imm, __ label());
893 emitFastArithImmToInt(X86::eax);
894 __ cvtsi2sd_rr(X86::eax, X86::xmm0);
896 __ link(loadedDouble, __ label());
897 if (opcodeID == op_add)
898 __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
899 else if (opcodeID == op_sub)
900 __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
902 ASSERT(opcodeID == op_mul);
903 __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
906 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
907 wasJSNumberCell2b = __ jmp();
909 // (2) This handles cases where src2 is an immediate number.
910 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
911 __ link(op2imm, __ label());
912 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
913 } else if (types.first().isReusable() && isSSE2Present()) {
914 ASSERT(types.first().mightBeNumber());
916 // Check op1 is a number
917 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
918 JmpSrc op1imm = __ jne();
919 if (!types.first().definitelyIsNumber()) {
920 emitJumpSlowCaseIfNotJSCell(X86::eax, i, src1);
921 __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
922 m_slowCases.append(SlowCaseEntry(__ jne(), i));
925 // (1) In this case src1 is a reusable number cell.
926 // Slow case if src2 is not a number type.
927 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
928 JmpSrc op2imm = __ jne();
929 if (!types.second().definitelyIsNumber()) {
930 emitJumpSlowCaseIfNotJSCell(X86::edx, i, src2);
931 __ cmpl_i32m(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
932 m_slowCases.append(SlowCaseEntry(__ jne(), i));
935 // (1a) if we get here, src2 is also a number cell
936 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
937 JmpSrc loadedDouble = __ jmp();
938 // (1b) if we get here, src2 is an immediate
939 __ link(op2imm, __ label());
940 emitFastArithImmToInt(X86::edx);
941 __ cvtsi2sd_rr(X86::edx, X86::xmm1);
943 __ link(loadedDouble, __ label());
944 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
945 if (opcodeID == op_add)
946 __ addsd_rr(X86::xmm1, X86::xmm0);
947 else if (opcodeID == op_sub)
948 __ subsd_rr(X86::xmm1, X86::xmm0);
950 ASSERT(opcodeID == op_mul);
951 __ mulsd_rr(X86::xmm1, X86::xmm0);
953 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
954 emitPutVirtualRegister(dst);
956 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
957 wasJSNumberCell1b = __ jmp();
959 // (2) This handles cases where src1 is an immediate number.
960 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
961 __ link(op1imm, __ label());
962 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
964 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
966 if (opcodeID == op_add) {
967 emitFastArithDeTagImmediate(X86::eax);
968 __ addl_rr(X86::edx, X86::eax);
969 m_slowCases.append(SlowCaseEntry(__ jo(), i));
970 } else if (opcodeID == op_sub) {
971 __ subl_rr(X86::edx, X86::eax);
972 m_slowCases.append(SlowCaseEntry(__ jo(), i));
973 emitFastArithReTagImmediate(X86::eax);
975 ASSERT(opcodeID == op_mul);
976 // convert eax & edx from JSImmediates to ints, and check if either are zero
977 emitFastArithImmToInt(X86::edx);
978 JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
979 __ testl_rr(X86::edx, X86::edx);
980 JmpSrc op2NonZero = __ jne();
981 __ link(op1Zero, __ label());
982 // if either input is zero, add the two together, and check if the result is < 0.
983 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
984 __ movl_rr(X86::eax, X86::ecx);
985 __ addl_rr(X86::edx, X86::ecx);
986 m_slowCases.append(SlowCaseEntry(__ js(), i));
987 // Skip the above check if neither input is zero
988 __ link(op2NonZero, __ label());
989 __ imull_rr(X86::edx, X86::eax);
990 m_slowCases.append(SlowCaseEntry(__ jo(), i));
991 emitFastArithReTagImmediate(X86::eax);
993 emitPutVirtualRegister(dst);
995 if (types.second().isReusable() && isSSE2Present()) {
996 __ link(wasJSNumberCell2, __ label());
997 __ link(wasJSNumberCell2b, __ label());
999 else if (types.first().isReusable() && isSSE2Present()) {
1000 __ link(wasJSNumberCell1, __ label());
1001 __ link(wasJSNumberCell1b, __ label());
1005 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
1007 JmpDst here = __ label();
1008 __ link(iter->from, here);
1009 if (types.second().isReusable() && isSSE2Present()) {
1010 if (!types.first().definitelyIsNumber()) {
1011 if (linkSlowCaseIfNotJSCell(++iter, src1))
1013 __ link(iter->from, here);
1015 if (!types.second().definitelyIsNumber()) {
1016 if (linkSlowCaseIfNotJSCell(++iter, src2))
1018 __ link(iter->from, here);
1020 __ link((++iter)->from, here);
1021 } else if (types.first().isReusable() && isSSE2Present()) {
1022 if (!types.first().definitelyIsNumber()) {
1023 if (linkSlowCaseIfNotJSCell(++iter, src1))
1025 __ link(iter->from, here);
1027 if (!types.second().definitelyIsNumber()) {
1028 if (linkSlowCaseIfNotJSCell(++iter, src2))
1030 __ link(iter->from, here);
1032 __ link((++iter)->from, here);
1034 __ link((++iter)->from, here);
1036 // additional entry point to handle -0 cases.
1037 if (opcodeID == op_mul)
1038 __ link((++iter)->from, here);
1040 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);
1041 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);
1042 if (opcodeID == op_add)
1043 emitCTICall(i, Interpreter::cti_op_add);
1044 else if (opcodeID == op_sub)
1045 emitCTICall(i, Interpreter::cti_op_sub);
1047 ASSERT(opcodeID == op_mul);
1048 emitCTICall(i, Interpreter::cti_op_mul);
1050 emitPutVirtualRegister(dst);
1053 void JIT::privateCompileMainPass()
1055 Instruction* instruction = m_codeBlock->instructions.begin();
1056 unsigned instructionCount = m_codeBlock->instructions.size();
1058 unsigned propertyAccessInstructionIndex = 0;
1059 unsigned callLinkInfoIndex = 0;
1061 for (unsigned i = 0; i < instructionCount; ) {
1062 ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
1064 #if ENABLE(OPCODE_SAMPLING)
1065 if (i > 0) // Avoid the overhead of sampling op_enter twice.
1066 __ movl_i32m(m_interpreter->sampler()->encodeSample(instruction + i), m_interpreter->sampler()->sampleSlot());
1069 m_labels[i] = __ label();
1070 OpcodeID opcodeID = m_interpreter->getOpcodeID(instruction[i].u.opcode);
1073 unsigned src = instruction[i + 2].u.operand;
1074 if (m_codeBlock->isConstantRegisterIndex(src))
1075 __ movl_i32r(asInteger(m_codeBlock->getConstant(src)), X86::eax);
1077 emitGetVirtualRegister(src, X86::eax, i);
1078 emitPutVirtualRegister(instruction[i + 1].u.operand);
1083 unsigned dst = instruction[i + 1].u.operand;
1084 unsigned src1 = instruction[i + 2].u.operand;
1085 unsigned src2 = instruction[i + 3].u.operand;
1087 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1088 emitGetVirtualRegister(src2, X86::eax, i);
1089 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1090 __ addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1091 m_slowCases.append(SlowCaseEntry(__ jo(), i));
1092 emitPutVirtualRegister(dst);
1093 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1094 emitGetVirtualRegister(src1, X86::eax, i);
1095 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1096 __ addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1097 m_slowCases.append(SlowCaseEntry(__ jo(), i));
1098 emitPutVirtualRegister(dst);
1100 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
1101 if (types.first().mightBeNumber() && types.second().mightBeNumber())
1102 compileBinaryArithOp(op_add, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1104 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
1105 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);
1106 emitCTICall(i, Interpreter::cti_op_add);
1107 emitPutVirtualRegister(instruction[i + 1].u.operand);
1115 if (m_codeBlock->needsFullScopeChain)
1116 emitCTICall(i, Interpreter::cti_op_end);
1117 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);
1118 __ pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
1124 unsigned target = instruction[i + 1].u.operand;
1125 m_jmpTable.append(JmpTable(__ jmp(), i + 1 + target));
1130 int srcDst = instruction[i + 1].u.operand;
1131 emitGetVirtualRegister(srcDst, X86::eax, i);
1132 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1133 __ addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1134 m_slowCases.append(SlowCaseEntry(__ jo(), i));
1135 emitPutVirtualRegister(srcDst);
1140 emitSlowScriptCheck(i);
1142 unsigned target = instruction[i + 1].u.operand;
1143 m_jmpTable.append(JmpTable(__ jmp(), i + 1 + target));
1147 case op_loop_if_less: {
1148 emitSlowScriptCheck(i);
1150 unsigned target = instruction[i + 3].u.operand;
1151 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1153 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::edx, i);
1154 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1155 __ cmpl_i32r(asInteger(src2imm), X86::edx);
1156 m_jmpTable.append(JmpTable(__ jl(), i + 3 + target));
1158 emitGetVirtualRegisters(instruction[i + 1].u.operand, X86::eax, instruction[i + 2].u.operand, X86::edx, i);
1159 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1160 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1161 __ cmpl_rr(X86::edx, X86::eax);
1162 m_jmpTable.append(JmpTable(__ jl(), i + 3 + target));
1167 case op_loop_if_lesseq: {
1168 emitSlowScriptCheck(i);
1170 unsigned target = instruction[i + 3].u.operand;
1171 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1173 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::edx, i);
1174 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1175 __ cmpl_i32r(asInteger(src2imm), X86::edx);
1176 m_jmpTable.append(JmpTable(__ jle(), i + 3 + target));
1178 emitGetVirtualRegisters(instruction[i + 1].u.operand, X86::eax, instruction[i + 2].u.operand, X86::edx, i);
1179 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1180 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1181 __ cmpl_rr(X86::edx, X86::eax);
1182 m_jmpTable.append(JmpTable(__ jle(), i + 3 + target));
1187 case op_new_object: {
1188 emitCTICall(i, Interpreter::cti_op_new_object);
1189 emitPutVirtualRegister(instruction[i + 1].u.operand);
1193 case op_put_by_id: {
1194 // In order to be able to repatch both the Structure, and the object offset, we store one pointer,
1195 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1196 // such that the Structure & offset are always at the same distance from this.
1198 int baseVReg = instruction[i + 1].u.operand;
1199 emitGetVirtualRegisters(baseVReg, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
1201 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);
1203 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
1204 emitJumpSlowCaseIfNotJSCell(X86::eax, i, baseVReg);
1206 JmpDst hotPathBegin = __ label();
1207 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1208 ++propertyAccessInstructionIndex;
1210 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1211 __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
1212 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdStructure);
1213 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1215 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1216 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1217 __ movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
1218 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetPutByIdPropertyMapOffset);
1223 case op_get_by_id: {
1224 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched.
1225 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
1226 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1227 // to jump back to if one of these trampolies finds a match.
1229 int baseVReg = instruction[i + 2].u.operand;
1230 emitGetVirtualRegister(baseVReg, X86::eax, i);
1232 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);
1234 emitJumpSlowCaseIfNotJSCell(X86::eax, i, baseVReg);
1236 JmpDst hotPathBegin = __ label();
1237 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1238 ++propertyAccessInstructionIndex;
1240 __ cmpl_i32m(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax);
1241 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdStructure);
1242 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1243 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdBranchToSlowCase);
1245 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1246 __ movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::eax);
1247 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdPropertyMapOffset);
1248 emitPutVirtualRegister(instruction[i + 1].u.operand);
1253 case op_instanceof: {
1254 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::eax, i); // value
1255 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::ecx, i); // baseVal
1256 emitGetVirtualRegister(instruction[i + 4].u.operand, X86::edx, i); // proto
1258 // check if any are immediates
1259 __ orl_rr(X86::eax, X86::ecx);
1260 __ orl_rr(X86::edx, X86::ecx);
1261 __ testl_i32r(JSImmediate::TagMask, X86::ecx);
1263 m_slowCases.append(SlowCaseEntry(__ jnz(), i));
1265 // check that all are object type - this is a bit of a bithack to avoid excess branching;
1266 // we check that the sum of the three type codes from Structures is exactly 3 * ObjectType,
1267 // this works because NumberType and StringType are smaller
1268 __ movl_i32r(3 * ObjectType, X86::ecx);
1269 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::eax);
1270 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::edx, X86::edx);
1271 __ subl_mr(FIELD_OFFSET(Structure, m_typeInfo.m_type), X86::eax, X86::ecx);
1272 __ subl_mr(FIELD_OFFSET(Structure, m_typeInfo.m_type), X86::edx, X86::ecx);
1273 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::edx, i); // reload baseVal
1274 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::edx, X86::edx);
1275 __ cmpl_rm(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_type), X86::edx);
1277 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1279 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
1280 __ movl_mr(FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::edx, X86::ecx);
1281 __ andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
1282 __ cmpl_i32r(ImplementsHasInstance, X86::ecx);
1284 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1286 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::ecx, i); // reload value
1287 emitGetVirtualRegister(instruction[i + 4].u.operand, X86::edx, i); // reload proto
1289 // optimistically load true result
1290 __ movl_i32r(asInteger(jsBoolean(true)), X86::eax);
1292 JmpDst loop = __ label();
1294 // load value's prototype
1295 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::ecx, X86::ecx);
1296 __ movl_mr(FIELD_OFFSET(Structure, m_prototype), X86::ecx, X86::ecx);
1298 __ cmpl_rr(X86::ecx, X86::edx);
1299 JmpSrc exit = __ je();
1301 __ cmpl_i32r(asInteger(jsNull()), X86::ecx);
1302 JmpSrc goToLoop = __ jne();
1303 __ link(goToLoop, loop);
1305 __ movl_i32r(asInteger(jsBoolean(false)), X86::eax);
1307 __ link(exit, __ label());
1309 emitPutVirtualRegister(instruction[i + 1].u.operand);
1314 case op_del_by_id: {
1315 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
1316 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1317 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
1318 emitCTICall(i, Interpreter::cti_op_del_by_id);
1319 emitPutVirtualRegister(instruction[i + 1].u.operand);
1324 unsigned dst = instruction[i + 1].u.operand;
1325 unsigned src1 = instruction[i + 2].u.operand;
1326 unsigned src2 = instruction[i + 3].u.operand;
1328 // For now, only plant a fast int case if the constant operand is greater than zero.
1329 JSValue* src1Value = getConstantImmediateNumericArg(src1);
1330 JSValue* src2Value = getConstantImmediateNumericArg(src2);
1332 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {
1333 emitGetVirtualRegister(src2, X86::eax, i);
1334 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1335 emitFastArithDeTagImmediate(X86::eax);
1336 __ imull_i32r(X86::eax, value, X86::eax);
1337 m_slowCases.append(SlowCaseEntry(__ jo(), i));
1338 emitFastArithReTagImmediate(X86::eax);
1339 emitPutVirtualRegister(dst);
1340 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {
1341 emitGetVirtualRegister(src1, X86::eax, i);
1342 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1343 emitFastArithDeTagImmediate(X86::eax);
1344 __ imull_i32r(X86::eax, value, X86::eax);
1345 m_slowCases.append(SlowCaseEntry(__ jo(), i));
1346 emitFastArithReTagImmediate(X86::eax);
1347 emitPutVirtualRegister(dst);
1349 compileBinaryArithOp(op_mul, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1355 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
1356 emitPutCTIArgConstant(reinterpret_cast<unsigned>(func), 0);
1357 emitCTICall(i, Interpreter::cti_op_new_func);
1358 emitPutVirtualRegister(instruction[i + 1].u.operand);
1364 case op_construct: {
1365 compileOpCall(opcodeID, instruction + i, i, callLinkInfoIndex++);
1366 i += (opcodeID == op_construct ? 7 : 5);
1369 case op_get_global_var: {
1370 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
1371 __ movl_i32r(asInteger(globalObject), X86::eax);
1372 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
1373 emitPutVirtualRegister(instruction[i + 1].u.operand);
1377 case op_put_global_var: {
1378 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::edx, i);
1379 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
1380 __ movl_i32r(asInteger(globalObject), X86::eax);
1381 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
1385 case op_get_scoped_var: {
1386 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
1388 emitGetVirtualRegister(RegisterFile::ScopeChain, X86::eax, i);
1390 __ movl_mr(FIELD_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
1392 __ movl_mr(FIELD_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
1393 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
1394 emitPutVirtualRegister(instruction[i + 1].u.operand);
1398 case op_put_scoped_var: {
1399 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
1401 emitGetVirtualRegister(RegisterFile::ScopeChain, X86::edx, i);
1402 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::eax, i);
1404 __ movl_mr(FIELD_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
1406 __ movl_mr(FIELD_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
1407 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
1411 case op_tear_off_activation: {
1412 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);
1413 emitCTICall(i, Interpreter::cti_op_tear_off_activation);
1417 case op_tear_off_arguments: {
1418 emitCTICall(i, Interpreter::cti_op_tear_off_arguments);
1423 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
1424 if (m_codeBlock->needsFullScopeChain)
1425 emitCTICall(i, Interpreter::cti_op_ret_scopeChain);
1427 // Return the result in %eax.
1428 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);
1430 // Grab the return address.
1431 emitGetVirtualRegister(RegisterFile::ReturnPC, X86::edx, i);
1433 // Restore our caller's "r".
1434 emitGetVirtualRegister(RegisterFile::CallerFrame, X86::edi, i);
1437 __ pushl_r(X86::edx);
1443 case op_new_array: {
1444 __ leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1445 emitPutCTIArg(X86::edx, 0);
1446 emitPutCTIArgConstant(instruction[i + 3].u.operand, 4);
1447 emitCTICall(i, Interpreter::cti_op_new_array);
1448 emitPutVirtualRegister(instruction[i + 1].u.operand);
1453 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1454 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);
1455 emitCTICall(i, Interpreter::cti_op_resolve);
1456 emitPutVirtualRegister(instruction[i + 1].u.operand);
1460 case op_construct_verify: {
1461 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);
1463 __ testl_i32r(JSImmediate::TagMask, X86::eax);
1464 JmpSrc isImmediate = __ jne();
1465 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
1466 __ cmpl_i32m(ObjectType, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type), X86::ecx);
1467 JmpSrc isObject = __ je();
1469 __ link(isImmediate, __ label());
1470 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::eax, i);
1471 emitPutVirtualRegister(instruction[i + 1].u.operand);
1472 __ link(isObject, __ label());
1477 case op_get_by_val: {
1478 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
1479 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1480 emitFastArithImmToInt(X86::edx);
1481 __ testl_i32r(JSImmediate::TagMask, X86::eax);
1482 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1483 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), X86::eax);
1484 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1486 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1487 __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1488 __ cmpl_rm(X86::edx, FIELD_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1489 m_slowCases.append(SlowCaseEntry(__ jbe(), i));
1491 // Get the value from the vector
1492 __ movl_mr(FIELD_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1493 emitPutVirtualRegister(instruction[i + 1].u.operand);
1497 case op_resolve_func: {
1498 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1499 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);
1500 emitCTICall(i, Interpreter::cti_op_resolve_func);
1501 emitPutVirtualRegister(instruction[i + 2].u.operand, X86::edx);
1502 emitPutVirtualRegister(instruction[i + 1].u.operand);
1507 compileBinaryArithOp(op_sub, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1511 case op_put_by_val: {
1512 emitGetVirtualRegisters(instruction[i + 1].u.operand, X86::eax, instruction[i + 2].u.operand, X86::edx, i);
1513 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1514 emitFastArithImmToInt(X86::edx);
1515 __ testl_i32r(JSImmediate::TagMask, X86::eax);
1516 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1517 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsArrayVptr), X86::eax);
1518 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1520 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1521 __ movl_mr(FIELD_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1522 __ cmpl_rm(X86::edx, FIELD_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1523 JmpSrc inFastVector = __ ja();
1524 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1525 __ cmpl_rm(X86::edx, FIELD_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1526 m_slowCases.append(SlowCaseEntry(__ jbe(), i));
1528 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1529 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1530 __ cmpl_i8m(0, FIELD_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1531 m_slowCases.append(SlowCaseEntry(__ je(), i));
1533 // All good - put the value into the array.
1534 __ link(inFastVector, __ label());
1535 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::eax, i);
1536 __ movl_rm(X86::eax, FIELD_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1540 CTI_COMPILE_BINARY_OP(op_lesseq)
1541 case op_loop_if_true: {
1542 emitSlowScriptCheck(i);
1544 unsigned target = instruction[i + 2].u.operand;
1545 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);
1547 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);
1548 JmpSrc isZero = __ je();
1549 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1550 m_jmpTable.append(JmpTable(__ jne(), i + 2 + target));
1552 __ cmpl_i32r(asInteger(JSImmediate::trueImmediate()), X86::eax);
1553 m_jmpTable.append(JmpTable(__ je(), i + 2 + target));
1554 __ cmpl_i32r(asInteger(JSImmediate::falseImmediate()), X86::eax);
1555 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1557 __ link(isZero, __ label());
1561 case op_resolve_base: {
1562 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1563 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);
1564 emitCTICall(i, Interpreter::cti_op_resolve_base);
1565 emitPutVirtualRegister(instruction[i + 1].u.operand);
1570 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
1571 emitCTICall(i, Interpreter::cti_op_negate);
1572 emitPutVirtualRegister(instruction[i + 1].u.operand);
1576 case op_resolve_skip: {
1577 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1578 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);
1579 emitPutCTIArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1580 emitCTICall(i, Interpreter::cti_op_resolve_skip);
1581 emitPutVirtualRegister(instruction[i + 1].u.operand);
1585 case op_resolve_global: {
1587 unsigned globalObject = asInteger(instruction[i + 2].u.jsCell);
1588 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1589 void* structureAddress = reinterpret_cast<void*>(instruction + i + 4);
1590 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1592 // Check Structure of global object
1593 __ movl_i32r(globalObject, X86::eax);
1594 __ movl_mr(structureAddress, X86::edx);
1595 __ cmpl_rm(X86::edx, FIELD_OFFSET(JSCell, m_structure), X86::eax);
1596 JmpSrc noMatch = __ jne(); // Structures don't match
1598 // Load cached property
1599 __ movl_mr(FIELD_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1600 __ movl_mr(offsetAddr, X86::edx);
1601 __ movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1602 emitPutVirtualRegister(instruction[i + 1].u.operand);
1603 JmpSrc end = __ jmp();
1606 __ link(noMatch, __ label());
1607 emitPutCTIArgConstant(globalObject, 0);
1608 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
1609 emitPutCTIArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1610 emitCTICall(i, Interpreter::cti_op_resolve_global);
1611 emitPutVirtualRegister(instruction[i + 1].u.operand);
1612 __ link(end, __ label());
1616 CTI_COMPILE_BINARY_OP(op_div)
1618 int srcDst = instruction[i + 1].u.operand;
1619 emitGetVirtualRegister(srcDst, X86::eax, i);
1620 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1621 __ subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1622 m_slowCases.append(SlowCaseEntry(__ jo(), i));
1623 emitPutVirtualRegister(srcDst);
1628 unsigned target = instruction[i + 3].u.operand;
1629 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1631 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::edx, i);
1632 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1633 __ cmpl_i32r(asInteger(src2imm), X86::edx);
1634 m_jmpTable.append(JmpTable(__ jge(), i + 3 + target));
1636 emitGetVirtualRegisters(instruction[i + 1].u.operand, X86::eax, instruction[i + 2].u.operand, X86::edx, i);
1637 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1638 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1639 __ cmpl_rr(X86::edx, X86::eax);
1640 m_jmpTable.append(JmpTable(__ jge(), i + 3 + target));
1646 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::eax, i);
1647 __ xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1648 __ testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1649 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1650 __ xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1651 emitPutVirtualRegister(instruction[i + 1].u.operand);
1656 unsigned target = instruction[i + 2].u.operand;
1657 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);
1659 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);
1660 m_jmpTable.append(JmpTable(__ je(), i + 2 + target));
1661 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1662 JmpSrc isNonZero = __ jne();
1664 __ cmpl_i32r(asInteger(JSImmediate::falseImmediate()), X86::eax);
1665 m_jmpTable.append(JmpTable(__ je(), i + 2 + target));
1666 __ cmpl_i32r(asInteger(JSImmediate::trueImmediate()), X86::eax);
1667 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1669 __ link(isNonZero, __ label());
1674 unsigned src = instruction[i + 1].u.operand;
1675 unsigned target = instruction[i + 2].u.operand;
1677 emitGetVirtualRegister(src, X86::eax, i);
1678 __ testl_i32r(JSImmediate::TagMask, X86::eax);
1679 JmpSrc isImmediate = __ jnz();
1681 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
1682 __ testl_i32m(MasqueradesAsUndefined, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::ecx);
1683 __ setnz_r(X86::eax);
1685 JmpSrc wasNotImmediate = __ jmp();
1687 __ link(isImmediate, __ label());
1689 __ movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1690 __ andl_rr(X86::eax, X86::ecx);
1691 __ cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1692 __ sete_r(X86::eax);
1694 __ link(wasNotImmediate, __ label());
1696 __ movzbl_rr(X86::eax, X86::eax);
1697 __ cmpl_i32r(0, X86::eax);
1698 m_jmpTable.append(JmpTable(__ jnz(), i + 2 + target));
1703 case op_jneq_null: {
1704 unsigned src = instruction[i + 1].u.operand;
1705 unsigned target = instruction[i + 2].u.operand;
1707 emitGetVirtualRegister(src, X86::eax, i);
1708 __ testl_i32r(JSImmediate::TagMask, X86::eax);
1709 JmpSrc isImmediate = __ jnz();
1711 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
1712 __ testl_i32m(MasqueradesAsUndefined, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::ecx);
1713 __ setz_r(X86::eax);
1715 JmpSrc wasNotImmediate = __ jmp();
1717 __ link(isImmediate, __ label());
1719 __ movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1720 __ andl_rr(X86::eax, X86::ecx);
1721 __ cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1722 __ setne_r(X86::eax);
1724 __ link(wasNotImmediate, __ label());
1726 __ movzbl_rr(X86::eax, X86::eax);
1727 __ cmpl_i32r(0, X86::eax);
1728 m_jmpTable.append(JmpTable(__ jnz(), i + 2 + target));
1734 int srcDst = instruction[i + 2].u.operand;
1735 emitGetVirtualRegister(srcDst, X86::eax, i);
1736 __ movl_rr(X86::eax, X86::edx);
1737 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1738 __ addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1739 m_slowCases.append(SlowCaseEntry(__ jo(), i));
1740 emitPutVirtualRegister(srcDst, X86::edx);
1741 emitPutVirtualRegister(instruction[i + 1].u.operand);
1745 case op_unexpected_load: {
1746 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1747 __ movl_i32r(asInteger(v), X86::eax);
1748 emitPutVirtualRegister(instruction[i + 1].u.operand);
1753 int retAddrDst = instruction[i + 1].u.operand;
1754 int target = instruction[i + 2].u.operand;
1755 __ movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1756 JmpDst addrPosition = __ label();
1757 m_jmpTable.append(JmpTable(__ jmp(), i + 2 + target));
1758 JmpDst sretTarget = __ label();
1759 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1764 __ jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1769 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
1770 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1771 __ cmpl_rr(X86::edx, X86::eax);
1772 __ sete_r(X86::eax);
1773 __ movzbl_rr(X86::eax, X86::eax);
1774 emitTagAsBoolImmediate(X86::eax);
1775 emitPutVirtualRegister(instruction[i + 1].u.operand);
1780 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::ecx, i);
1781 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1782 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1783 emitFastArithImmToInt(X86::eax);
1784 emitFastArithImmToInt(X86::ecx);
1785 __ shll_CLr(X86::eax);
1786 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1787 emitPutVirtualRegister(instruction[i + 1].u.operand);
1792 unsigned src1 = instruction[i + 2].u.operand;
1793 unsigned src2 = instruction[i + 3].u.operand;
1794 unsigned dst = instruction[i + 1].u.operand;
1795 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1796 emitGetVirtualRegister(src2, X86::eax, i);
1797 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1798 __ andl_i32r(asInteger(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1799 emitPutVirtualRegister(dst);
1800 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1801 emitGetVirtualRegister(src1, X86::eax, i);
1802 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1803 __ andl_i32r(asInteger(value), X86::eax);
1804 emitPutVirtualRegister(dst);
1806 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx, i);
1807 __ andl_rr(X86::edx, X86::eax);
1808 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1809 emitPutVirtualRegister(dst);
1815 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::ecx, i);
1816 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1817 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1818 emitFastArithImmToInt(X86::ecx);
1819 __ sarl_CLr(X86::eax);
1820 emitFastArithPotentiallyReTagImmediate(X86::eax);
1821 emitPutVirtualRegister(instruction[i + 1].u.operand);
1826 emitGetVirtualRegister(instruction[i + 2].u.operand, X86::eax, i);
1827 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1828 __ xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1829 emitPutVirtualRegister(instruction[i + 1].u.operand);
1833 case op_resolve_with_base: {
1834 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1835 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);
1836 emitCTICall(i, Interpreter::cti_op_resolve_with_base);
1837 emitPutVirtualRegister(instruction[i + 2].u.operand, X86::edx);
1838 emitPutVirtualRegister(instruction[i + 1].u.operand);
1842 case op_new_func_exp: {
1843 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1844 emitPutCTIArgConstant(reinterpret_cast<unsigned>(func), 0);
1845 emitCTICall(i, Interpreter::cti_op_new_func_exp);
1846 emitPutVirtualRegister(instruction[i + 1].u.operand);
1851 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::ecx, i);
1852 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1853 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1854 emitFastArithDeTagImmediate(X86::eax);
1855 m_slowCases.append(SlowCaseEntry(emitFastArithDeTagImmediateJumpIfZero(X86::ecx), i));
1857 __ idivl_r(X86::ecx);
1858 emitFastArithReTagImmediate(X86::edx);
1859 __ movl_rr(X86::edx, X86::eax);
1860 emitPutVirtualRegister(instruction[i + 1].u.operand);
1865 unsigned target = instruction[i + 2].u.operand;
1866 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);
1868 __ cmpl_i32r(asInteger(JSImmediate::zeroImmediate()), X86::eax);
1869 JmpSrc isZero = __ je();
1870 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1871 m_jmpTable.append(JmpTable(__ jne(), i + 2 + target));
1873 __ cmpl_i32r(asInteger(JSImmediate::trueImmediate()), X86::eax);
1874 m_jmpTable.append(JmpTable(__ je(), i + 2 + target));
1875 __ cmpl_i32r(asInteger(JSImmediate::falseImmediate()), X86::eax);
1876 m_slowCases.append(SlowCaseEntry(__ jne(), i));
1878 __ link(isZero, __ label());
1882 CTI_COMPILE_BINARY_OP(op_less)
1884 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
1885 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1886 __ cmpl_rr(X86::eax, X86::edx);
1888 __ setne_r(X86::eax);
1889 __ movzbl_rr(X86::eax, X86::eax);
1890 emitTagAsBoolImmediate(X86::eax);
1892 emitPutVirtualRegister(instruction[i + 1].u.operand);
1898 int srcDst = instruction[i + 2].u.operand;
1899 emitGetVirtualRegister(srcDst, X86::eax, i);
1900 __ movl_rr(X86::eax, X86::edx);
1901 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1902 __ subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1903 m_slowCases.append(SlowCaseEntry(__ jo(), i));
1904 emitPutVirtualRegister(srcDst, X86::edx);
1905 emitPutVirtualRegister(instruction[i + 1].u.operand);
1909 CTI_COMPILE_BINARY_OP(op_urshift)
1911 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
1912 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1913 __ xorl_rr(X86::edx, X86::eax);
1914 emitFastArithReTagImmediate(X86::eax);
1915 emitPutVirtualRegister(instruction[i + 1].u.operand);
1919 case op_new_regexp: {
1920 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1921 emitPutCTIArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1922 emitCTICall(i, Interpreter::cti_op_new_regexp);
1923 emitPutVirtualRegister(instruction[i + 1].u.operand);
1928 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::edx, i);
1929 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1930 __ orl_rr(X86::edx, X86::eax);
1931 emitPutVirtualRegister(instruction[i + 1].u.operand);
1936 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);
1937 emitCTICall(i, Interpreter::cti_op_throw);
1938 __ addl_i8r(0x20, X86::esp);
1939 __ popl_r(X86::ebx);
1940 __ popl_r(X86::edi);
1941 __ popl_r(X86::esi);
1946 case op_get_pnames: {
1947 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
1948 emitCTICall(i, Interpreter::cti_op_get_pnames);
1949 emitPutVirtualRegister(instruction[i + 1].u.operand);
1953 case op_next_pname: {
1954 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
1955 unsigned target = instruction[i + 3].u.operand;
1956 emitCTICall(i, Interpreter::cti_op_next_pname);
1957 __ testl_rr(X86::eax, X86::eax);
1958 JmpSrc endOfIter = __ je();
1959 emitPutVirtualRegister(instruction[i + 1].u.operand);
1960 m_jmpTable.append(JmpTable(__ jmp(), i + 3 + target));
1961 __ link(endOfIter, __ label());
1965 case op_push_scope: {
1966 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);
1967 emitCTICall(i, Interpreter::cti_op_push_scope);
1971 case op_pop_scope: {
1972 emitCTICall(i, Interpreter::cti_op_pop_scope);
1976 CTI_COMPILE_UNARY_OP(op_typeof)
1977 CTI_COMPILE_UNARY_OP(op_is_undefined)
1978 CTI_COMPILE_UNARY_OP(op_is_boolean)
1979 CTI_COMPILE_UNARY_OP(op_is_number)
1980 CTI_COMPILE_UNARY_OP(op_is_string)
1981 CTI_COMPILE_UNARY_OP(op_is_object)
1982 CTI_COMPILE_UNARY_OP(op_is_function)
1984 compileOpStrictEq(instruction + i, i, OpStrictEq);
1988 case op_nstricteq: {
1989 compileOpStrictEq(instruction + i, i, OpNStrictEq);
1993 case op_to_jsnumber: {
1994 int srcVReg = instruction[i + 2].u.operand;
1995 emitGetVirtualRegister(srcVReg, X86::eax, i);
1997 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1998 JmpSrc wasImmediate = __ jnz();
2000 emitJumpSlowCaseIfNotJSCell(X86::eax, i, srcVReg);
2002 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
2003 __ cmpl_i32m(NumberType, FIELD_OFFSET(Structure, m_typeInfo.m_type), X86::ecx);
2005 m_slowCases.append(SlowCaseEntry(__ jne(), i));
2007 __ link(wasImmediate, __ label());
2009 emitPutVirtualRegister(instruction[i + 1].u.operand);
2014 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
2015 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);
2016 emitCTICall(i, Interpreter::cti_op_in);
2017 emitPutVirtualRegister(instruction[i + 1].u.operand);
2021 case op_push_new_scope: {
2022 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2023 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 0);
2024 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);
2025 emitCTICall(i, Interpreter::cti_op_push_new_scope);
2026 emitPutVirtualRegister(instruction[i + 1].u.operand);
2031 emitGetCTIParam(CTI_ARGS_callFrame, X86::edi); // edi := r
2032 emitPutVirtualRegister(instruction[i + 1].u.operand);
2036 case op_jmp_scopes: {
2037 unsigned count = instruction[i + 1].u.operand;
2038 emitPutCTIArgConstant(count, 0);
2039 emitCTICall(i, Interpreter::cti_op_jmp_scopes);
2040 unsigned target = instruction[i + 2].u.operand;
2041 m_jmpTable.append(JmpTable(__ jmp(), i + 2 + target));
2045 case op_put_by_index: {
2046 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);
2047 emitPutCTIArgConstant(instruction[i + 2].u.operand, 4);
2048 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 8, X86::ecx);
2049 emitCTICall(i, Interpreter::cti_op_put_by_index);
2053 case op_switch_imm: {
2054 unsigned tableIndex = instruction[i + 1].u.operand;
2055 unsigned defaultOffset = instruction[i + 2].u.operand;
2056 unsigned scrutinee = instruction[i + 3].u.operand;
2058 // create jump table for switch destinations, track this switch statement.
2059 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
2060 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
2061 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
2063 emitPutCTIArgFromVirtualRegister(scrutinee, 0, X86::ecx);
2064 emitPutCTIArgConstant(tableIndex, 4);
2065 emitCTICall(i, Interpreter::cti_op_switch_imm);
2070 case op_switch_char: {
2071 unsigned tableIndex = instruction[i + 1].u.operand;
2072 unsigned defaultOffset = instruction[i + 2].u.operand;
2073 unsigned scrutinee = instruction[i + 3].u.operand;
2075 // create jump table for switch destinations, track this switch statement.
2076 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
2077 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
2078 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
2080 emitPutCTIArgFromVirtualRegister(scrutinee, 0, X86::ecx);
2081 emitPutCTIArgConstant(tableIndex, 4);
2082 emitCTICall(i, Interpreter::cti_op_switch_char);
2087 case op_switch_string: {
2088 unsigned tableIndex = instruction[i + 1].u.operand;
2089 unsigned defaultOffset = instruction[i + 2].u.operand;
2090 unsigned scrutinee = instruction[i + 3].u.operand;
2092 // create jump table for switch destinations, track this switch statement.
2093 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
2094 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
2096 emitPutCTIArgFromVirtualRegister(scrutinee, 0, X86::ecx);
2097 emitPutCTIArgConstant(tableIndex, 4);
2098 emitCTICall(i, Interpreter::cti_op_switch_string);
2103 case op_del_by_val: {
2104 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
2105 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);
2106 emitCTICall(i, Interpreter::cti_op_del_by_val);
2107 emitPutVirtualRegister(instruction[i + 1].u.operand);
2111 case op_put_getter: {
2112 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);
2113 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2114 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
2115 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 8, X86::ecx);
2116 emitCTICall(i, Interpreter::cti_op_put_getter);
2120 case op_put_setter: {
2121 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::ecx);
2122 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2123 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
2124 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 8, X86::ecx);
2125 emitCTICall(i, Interpreter::cti_op_put_setter);
2129 case op_new_error: {
2130 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
2131 emitPutCTIArgConstant(instruction[i + 2].u.operand, 0);
2132 emitPutCTIArgConstant(asInteger(message), 4);
2133 emitPutCTIArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
2134 emitCTICall(i, Interpreter::cti_op_new_error);
2135 emitPutVirtualRegister(instruction[i + 1].u.operand);
2140 emitPutCTIArgConstant(instruction[i + 1].u.operand, 0);
2141 emitPutCTIArgConstant(instruction[i + 2].u.operand, 4);
2142 emitPutCTIArgConstant(instruction[i + 3].u.operand, 8);
2143 emitCTICall(i, Interpreter::cti_op_debug);
2148 unsigned dst = instruction[i + 1].u.operand;
2149 unsigned src1 = instruction[i + 2].u.operand;
2151 emitGetVirtualRegister(src1, X86::eax, i);
2152 __ testl_i32r(JSImmediate::TagMask, X86::eax);
2153 JmpSrc isImmediate = __ jnz();
2155 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
2156 __ testl_i32m(MasqueradesAsUndefined, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::ecx);
2157 __ setnz_r(X86::eax);
2159 JmpSrc wasNotImmediate = __ jmp();
2161 __ link(isImmediate, __ label());
2163 __ movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
2164 __ andl_rr(X86::eax, X86::ecx);
2165 __ cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
2166 __ sete_r(X86::eax);
2168 __ link(wasNotImmediate, __ label());
2170 __ movzbl_rr(X86::eax, X86::eax);
2171 emitTagAsBoolImmediate(X86::eax);
2172 emitPutVirtualRegister(dst);
2178 unsigned dst = instruction[i + 1].u.operand;
2179 unsigned src1 = instruction[i + 2].u.operand;
2181 emitGetVirtualRegister(src1, X86::eax, i);
2182 __ testl_i32r(JSImmediate::TagMask, X86::eax);
2183 JmpSrc isImmediate = __ jnz();
2185 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::ecx);
2186 __ testl_i32m(MasqueradesAsUndefined, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::ecx);
2187 __ setz_r(X86::eax);
2189 JmpSrc wasNotImmediate = __ jmp();
2191 __ link(isImmediate, __ label());
2193 __ movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
2194 __ andl_rr(X86::eax, X86::ecx);
2195 __ cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
2196 __ setne_r(X86::eax);
2198 __ link(wasNotImmediate, __ label());
2200 __ movzbl_rr(X86::eax, X86::eax);
2201 emitTagAsBoolImmediate(X86::eax);
2202 emitPutVirtualRegister(dst);
2208 // Even though CTI doesn't use them, we initialize our constant
2209 // registers to zap stale pointers, to avoid unnecessarily prolonging
2210 // object lifetime and increasing GC pressure.
2211 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
2212 for (size_t j = 0; j < count; ++j)
2213 emitInitRegister(j);
2218 case op_enter_with_activation: {
2219 // Even though CTI doesn't use them, we initialize our constant
2220 // registers to zap stale pointers, to avoid unnecessarily prolonging
2221 // object lifetime and increasing GC pressure.
2222 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
2223 for (size_t j = 0; j < count; ++j)
2224 emitInitRegister(j);
2226 emitCTICall(i, Interpreter::cti_op_push_activation);
2227 emitPutVirtualRegister(instruction[i + 1].u.operand);
2232 case op_create_arguments: {
2233 emitCTICall(i, (m_codeBlock->numParameters == 1) ? Interpreter::cti_op_create_arguments_no_params : Interpreter::cti_op_create_arguments);
2237 case op_convert_this: {
2238 emitGetVirtualRegister(instruction[i + 1].u.operand, X86::eax, i);
2240 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
2241 __ movl_mr(FIELD_OFFSET(JSCell, m_structure), X86::eax, X86::edx);
2242 __ testl_i32m(NeedsThisConversion, FIELD_OFFSET(Structure, m_typeInfo.m_flags), X86::edx);
2243 m_slowCases.append(SlowCaseEntry(__ jnz(), i));
2248 case op_profile_will_call: {
2249 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
2250 __ cmpl_i32m(0, X86::eax);
2251 JmpSrc noProfiler = __ je();
2252 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::eax);
2253 emitCTICall(i, Interpreter::cti_op_profile_will_call);
2254 __ link(noProfiler, __ label());
2259 case op_profile_did_call: {
2260 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
2261 __ cmpl_i32m(0, X86::eax);
2262 JmpSrc noProfiler = __ je();
2263 emitPutCTIArgFromVirtualRegister(instruction[i + 1].u.operand, 0, X86::eax);
2264 emitCTICall(i, Interpreter::cti_op_profile_did_call);
2265 __ link(noProfiler, __ label());
2270 case op_get_array_length:
2271 case op_get_by_id_chain:
2272 case op_get_by_id_generic:
2273 case op_get_by_id_proto:
2274 case op_get_by_id_proto_list:
2275 case op_get_by_id_self:
2276 case op_get_by_id_self_list:
2277 case op_get_string_length:
2278 case op_put_by_id_generic:
2279 case op_put_by_id_replace:
2280 case op_put_by_id_transition:
2281 ASSERT_NOT_REACHED();
2285 ASSERT(propertyAccessInstructionIndex == m_codeBlock->propertyAccessInstructions.size());
2286 ASSERT(callLinkInfoIndex == m_codeBlock->callLinkInfos.size());
2290 void JIT::privateCompileLinkPass()
2292 unsigned jmpTableCount = m_jmpTable.size();
2293 for (unsigned i = 0; i < jmpTableCount; ++i)
2294 __ link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
2298 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
2300 __ link(iter->from, __ label()); \
2301 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx); \
2302 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx); \
2303 emitCTICall(i, Interpreter::cti_##name); \
2304 emitPutVirtualRegister(instruction[i + 1].u.operand); \
2309 #define CTI_COMPILE_BINARY_OP_SLOW_CASE_DOUBLE_ENTRY(name) \
2311 __ link(iter->from, __ label()); \
2312 __ link((++iter)->from, __ label()); \
2313 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx); \
2314 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx); \
2315 emitCTICall(i, Interpreter::cti_##name); \
2316 emitPutVirtualRegister(instruction[i + 1].u.operand); \
2321 void JIT::privateCompileSlowCases()
2323 unsigned propertyAccessInstructionIndex = 0;
2324 unsigned callLinkInfoIndex = 0;
2326 Instruction* instruction = m_codeBlock->instructions.begin();
2327 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
2328 // FIXME: enable peephole optimizations for slow cases when applicable
2329 killLastResultRegister();
2331 unsigned i = iter->to;
2333 unsigned firstTo = i;
2336 switch (OpcodeID opcodeID = m_interpreter->getOpcodeID(instruction[i].u.opcode)) {
2337 case op_convert_this: {
2338 __ link(iter->from, __ label());
2339 __ link((++iter)->from, __ label());
2340 emitPutCTIArg(X86::eax, 0);
2341 emitCTICall(i, Interpreter::cti_op_convert_this);
2342 emitPutVirtualRegister(instruction[i + 1].u.operand);
2347 unsigned dst = instruction[i + 1].u.operand;
2348 unsigned src1 = instruction[i + 2].u.operand;
2349 unsigned src2 = instruction[i + 3].u.operand;
2350 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
2351 JmpSrc notImm = iter->from;
2352 __ link((++iter)->from, __ label());
2353 __ subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
2354 __ link(notImm, __ label());
2355 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);
2356 emitPutCTIArg(X86::eax, 4);
2357 emitCTICall(i, Interpreter::cti_op_add);
2358 emitPutVirtualRegister(dst);
2359 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
2360 JmpSrc notImm = iter->from;
2361 __ link((++iter)->from, __ label());
2362 __ subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
2363 __ link(notImm, __ label());
2364 emitPutCTIArg(X86::eax, 0);
2365 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);
2366 emitCTICall(i, Interpreter::cti_op_add);
2367 emitPutVirtualRegister(dst);
2369 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
2370 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2371 compileBinaryArithOpSlowCase(op_add, iter, dst, src1, src2, types, i);
2373 ASSERT_NOT_REACHED();
2379 case op_get_by_val: {
2380 // The slow case that handles accesses to arrays (below) may jump back up to here.
2381 JmpDst beginGetByValSlow = __ label();
2383 JmpSrc notImm = iter->from;
2384 __ link((++iter)->from, __ label());
2385 __ link((++iter)->from, __ label());
2386 emitFastArithIntToImmNoCheck(X86::edx);
2387 __ link(notImm, __ label());
2388 emitPutCTIArg(X86::eax, 0);
2389 emitPutCTIArg(X86::edx, 4);
2390 emitCTICall(i, Interpreter::cti_op_get_by_val);
2391 emitPutVirtualRegister(instruction[i + 1].u.operand);
2392 __ link(__ jmp(), m_labels[i + 4]);
2394 // This is slow case that handles accesses to arrays above the fast cut-off.
2395 // First, check if this is an access to the vector
2396 __ link((++iter)->from, __ label());
2397 __ cmpl_rm(X86::edx, FIELD_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
2398 __ link(__ jbe(), beginGetByValSlow);
2400 // okay, missed the fast region, but it is still in the vector. Get the value.
2401 __ movl_mr(FIELD_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
2402 // Check whether the value loaded is zero; if so we need to return undefined.
2403 __ testl_rr(X86::ecx, X86::ecx);
2404 __ link(__ je(), beginGetByValSlow);
2405 __ movl_rr(X86::ecx, X86::eax);
2406 emitPutVirtualRegister(instruction[i + 1].u.operand, X86::eax);
2412 compileBinaryArithOpSlowCase(op_sub, iter, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2417 __ link(iter->from, __ label());
2418 __ link((++iter)->from, __ label());
2419 emitPutCTIArg(X86::eax, 0);
2420 emitPutCTIArg(X86::ecx, 4);
2421 emitCTICall(i, Interpreter::cti_op_rshift);
2422 emitPutVirtualRegister(instruction[i + 1].u.operand);
2427 JmpSrc notImm1 = iter->from;
2428 JmpSrc notImm2 = (++iter)->from;
2429 __ link((++iter)->from, __ label());
2430 emitGetVirtualRegisters(instruction[i + 2].u.operand, X86::eax, instruction[i + 3].u.operand, X86::ecx, i);
2431 __ link(notImm1, __ label());
2432 __ link(notImm2, __ label());
2433 emitPutCTIArg(X86::eax, 0);
2434 emitPutCTIArg(X86::ecx, 4);
2435 emitCTICall(i, Interpreter::cti_op_lshift);
2436 emitPutVirtualRegister(instruction[i + 1].u.operand);
2440 case op_loop_if_less: {
2441 emitSlowScriptCheck(i);
2443 unsigned target = instruction[i + 3].u.operand;
2444 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2446 __ link(iter->from, __ label());
2447 emitPutCTIArg(X86::edx, 0);
2448 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 4, X86::ecx);
2449 emitCTICall(i, Interpreter::cti_op_loop_if_less);
2450 __ testl_rr(X86::eax, X86::eax);
2451 __ link(__ jne(), m_labels[i + 3 + target]);
2453 __ link(iter->from, __ label());
2454 __ link((++iter)->from, __ label());
2455 emitPutCTIArg(X86::eax, 0);
2456 emitPutCTIArg(X86::edx, 4);
2457 emitCTICall(i, Interpreter::cti_op_loop_if_less);
2458 __ testl_rr(X86::eax, X86::eax);
2459 __ link(__ jne(), m_labels[i + 3 + target]);
2464 case op_put_by_id: {
2465 if (linkSlowCaseIfNotJSCell(iter, instruction[i + 1].u.operand))
2467 __ link(iter->from, __ label());
2469 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2470 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
2471 emitPutCTIArg(X86::eax, 0);
2472 emitPutCTIArg(X86::edx, 8);
2473 JmpSrc call = emitCTICall(i, Interpreter::cti_op_put_by_id);
2475 // Track the location of the call; this will be used to recover repatch information.
2476 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);
2477 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
2478 ++propertyAccessInstructionIndex;
2483 case op_get_by_id: {
2484 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
2485 // so that we only need track one pointer into the slow case code - we track a pointer to the location
2486 // of the call (which we can use to look up the repatch information), but should a array-length or
2487 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
2488 // the distance from the call to the head of the slow case.
2490 if (linkSlowCaseIfNotJSCell(iter, instruction[i + 2].u.operand))
2492 __ link(iter->from, __ label());
2495 JmpDst coldPathBegin = __ label();
2497 emitPutCTIArg(X86::eax, 0);
2498 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
2499 emitPutCTIArgConstant(reinterpret_cast<unsigned>(ident), 4);
2500 JmpSrc call = emitCTICall(i, Interpreter::cti_op_get_by_id);
2501 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
2502 emitPutVirtualRegister(instruction[i + 1].u.operand);
2504 // Track the location of the call; this will be used to recover repatch information.
2505 ASSERT(m_codeBlock->propertyAccessInstructions[propertyAccessInstructionIndex].bytecodeIndex == i);
2506 m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
2507 ++propertyAccessInstructionIndex;
2512 case op_loop_if_lesseq: {
2513 emitSlowScriptCheck(i);
2515 unsigned target = instruction[i + 3].u.operand;
2516 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2518 __ link(iter->from, __ label());
2519 emitPutCTIArg(X86::edx, 0);
2520 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 4, X86::ecx);
2521 emitCTICall(i, Interpreter::cti_op_loop_if_lesseq);
2522 __ testl_rr(X86::eax, X86::eax);
2523 __ link(__ jne(), m_labels[i + 3 + target]);
2525 __ link(iter->from, __ label());
2526 __ link((++iter)->from, __ label());
2527 emitPutCTIArg(X86::eax, 0);
2528 emitPutCTIArg(X86::edx, 4);
2529 emitCTICall(i, Interpreter::cti_op_loop_if_lesseq);
2530 __ testl_rr(X86::eax, X86::eax);
2531 __ link(__ jne(), m_labels[i + 3 + target]);
2537 unsigned srcDst = instruction[i + 1].u.operand;
2538 JmpSrc notImm = iter->from;
2539 __ link((++iter)->from, __ label());
2540 __ subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2541 __ link(notImm, __ label());
2542 emitPutCTIArg(X86::eax, 0);
2543 emitCTICall(i, Interpreter::cti_op_pre_inc);
2544 emitPutVirtualRegister(srcDst);
2548 case op_put_by_val: {
2549 // Normal slow cases - either is not an immediate imm, or is an array.
2550 JmpSrc notImm = iter->from;
2551 __ link((++iter)->from, __ label());
2552 __ link((++iter)->from, __ label());
2553 emitFastArithIntToImmNoCheck(X86::edx);
2554 __ link(notImm, __ label());
2555 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::ecx, i);
2556 emitPutCTIArg(X86::eax, 0);
2557 emitPutCTIArg(X86::edx, 4);
2558 emitPutCTIArg(X86::ecx, 8);
2559 emitCTICall(i, Interpreter::cti_op_put_by_val);
2560 __ link(__ jmp(), m_labels[i + 4]);
2562 // slow cases for immediate int accesses to arrays
2563 __ link((++iter)->from, __ label());
2564 __ link((++iter)->from, __ label());
2565 emitGetVirtualRegister(instruction[i + 3].u.operand, X86::ecx, i);
2566 emitPutCTIArg(X86::eax, 0);
2567 emitPutCTIArg(X86::edx, 4);
2568 emitPutCTIArg(X86::ecx, 8);
2569 emitCTICall(i, Interpreter::cti_op_put_by_val_array);
2574 case op_loop_if_true: {
2575 emitSlowScriptCheck(i);
2577 __ link(iter->from, __ label());
2578 emitPutCTIArg(X86::eax, 0);
2579 emitCTICall(i, Interpreter::cti_op_jtrue);
2580 __ testl_rr(X86::eax, X86::eax);
2581 unsigned target = instruction[i + 2].u.operand;
2582 __ link(__ jne(), m_labels[i + 2 + target]);
2587 unsigned srcDst = instruction[i + 1].u.operand;
2588 JmpSrc notImm = iter->from;
2589 __ link((++iter)->from, __ label());
2590 __ addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2591 __ link(notImm, __ label());
2592 emitPutCTIArg(X86::eax, 0);
2593 emitCTICall(i, Interpreter::cti_op_pre_dec);
2594 emitPutVirtualRegister(srcDst);
2599 unsigned target = instruction[i + 3].u.operand;
2600 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2602 __ link(iter->from, __ label());
2603 emitPutCTIArg(X86::edx, 0);
2604 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 4, X86::ecx);
2605 emitCTICall(i, Interpreter::cti_op_jless);
2606 __ testl_rr(X86::eax, X86::eax);
2607 __ link(__ je(), m_labels[i + 3 + target]);
2609 __ link(iter->from, __ label());
2610 __ link((++iter)->from, __ label());
2611 emitPutCTIArg(X86::eax, 0);
2612 emitPutCTIArg(X86::edx, 4);
2613 emitCTICall(i, Interpreter::cti_op_jless);
2614 __ testl_rr(X86::eax, X86::eax);
2615 __ link(__ je(), m_labels[i + 3 + target]);
2621 __ link(iter->from, __ label());
2622 __ xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2623 emitPutCTIArg(X86::eax, 0);
2624 emitCTICall(i, Interpreter::cti_op_not);
2625 emitPutVirtualRegister(instruction[i + 1].u.operand);
2630 __ link(iter->from, __ label());
2631 emitPutCTIArg(X86::eax, 0);
2632 emitCTICall(i, Interpreter::cti_op_jtrue);
2633 __ testl_rr(X86::eax, X86::eax);
2634 unsigned target = instruction[i + 2].u.operand;
2635 __ link(__ je(), m_labels[i + 2 + target]); // inverted!
2640 unsigned srcDst = instruction[i + 2].u.operand;
2641 __ link(iter->from, __ label());
2642 __ link((++iter)->from, __ label());
2643 emitPutCTIArg(X86::eax, 0);
2644 emitCTICall(i, Interpreter::cti_op_post_inc);
2645 emitPutVirtualRegister(srcDst, X86::edx);
2646 emitPutVirtualRegister(instruction[i + 1].u.operand);
2651 __ link(iter->from, __ label());
2652 emitPutCTIArg(X86::eax, 0);
2653 emitCTICall(i, Interpreter::cti_op_bitnot);
2654 emitPutVirtualRegister(instruction[i + 1].u.operand);
2659 unsigned src1 = instruction[i + 2].u.operand;
2660 unsigned src2 = instruction[i + 3].u.operand;
2661 unsigned dst = instruction[i + 1].u.operand;
2662 if (getConstantImmediateNumericArg(src1)) {
2663 __ link(iter->from, __ label());
2664 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);
2665 emitPutCTIArg(X86::eax, 4);
2666 emitCTICall(i, Interpreter::cti_op_bitand);
2667 emitPutVirtualRegister(dst);
2668 } else if (getConstantImmediateNumericArg(src2)) {
2669 __ link(iter->from, __ label());
2670 emitPutCTIArg(X86::eax, 0);
2671 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);
2672 emitCTICall(i, Interpreter::cti_op_bitand);
2673 emitPutVirtualRegister(dst);
2675 __ link(iter->from, __ label());
2676 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);
2677 emitPutCTIArg(X86::edx, 4);
2678 emitCTICall(i, Interpreter::cti_op_bitand);
2679 emitPutVirtualRegister(dst);
2685 __ link(iter->from, __ label());
2686 emitPutCTIArg(X86::eax, 0);
2687 emitCTICall(i, Interpreter::cti_op_jtrue);
2688 __ testl_rr(X86::eax, X86::eax);
2689 unsigned target = instruction[i + 2].u.operand;
2690 __ link(__ jne(), m_labels[i + 2 + target]);
2695 unsigned srcDst = instruction[i + 2].u.operand;
2696 __ link(iter->from, __ label());
2697 __ link((++iter)->from, __ label());
2698 emitPutCTIArg(X86::eax, 0);
2699 emitCTICall(i, Interpreter::cti_op_post_dec);
2700 emitPutVirtualRegister(srcDst, X86::edx);
2701 emitPutVirtualRegister(instruction[i + 1].u.operand);
2706 __ link(iter->from, __ label());
2707 emitPutCTIArg(X86::eax, 0);
2708 emitPutCTIArg(X86::edx, 4);
2709 emitCTICall(i, Interpreter::cti_op_bitxor);
2710 emitPutVirtualRegister(instruction[i + 1].u.operand);
2715 __ link(iter->from, __ label());
2716 emitPutCTIArg(X86::eax, 0);
2717 emitPutCTIArg(X86::edx, 4);
2718 emitCTICall(i, Interpreter::cti_op_bitor);
2719 emitPutVirtualRegister(instruction[i + 1].u.operand);
2724 __ link(iter->from, __ label());
2725 emitPutCTIArg(X86::eax, 0);
2726 emitPutCTIArg(X86::edx, 4);
2727 emitCTICall(i, Interpreter::cti_op_eq);
2728 emitPutVirtualRegister(instruction[i + 1].u.operand);
2733 __ link(iter->from, __ label());
2734 emitPutCTIArg(X86::eax, 0);
2735 emitPutCTIArg(X86::edx, 4);
2736 emitCTICall(i, Interpreter::cti_op_neq);
2737 emitPutVirtualRegister(instruction[i + 1].u.operand);
2741 CTI_COMPILE_BINARY_OP_SLOW_CASE_DOUBLE_ENTRY(op_stricteq);
2742 CTI_COMPILE_BINARY_OP_SLOW_CASE_DOUBLE_ENTRY(op_nstricteq);
2743 case op_instanceof: {
2744 __ link(iter->from, __ label());
2745 __ link((++iter)->from, __ label());
2746 __ link((++iter)->from, __ label());
2747 emitPutCTIArgFromVirtualRegister(instruction[i + 2].u.operand, 0, X86::ecx);
2748 emitPutCTIArgFromVirtualRegister(instruction[i + 3].u.operand, 4, X86::ecx);
2749 emitPutCTIArgFromVirtualRegister(instruction[i + 4].u.operand, 8, X86::ecx);
2750 emitCTICall(i, Interpreter::cti_op_instanceof);
2751 emitPutVirtualRegister(instruction[i + 1].u.operand);
2756 JmpSrc notImm1 = iter->from;
2757 JmpSrc notImm2 = (++iter)->from;
2758 __ link((++iter)->from, __ label());
2759 emitFastArithReTagImmediate(X86::eax);
2760 emitFastArithReTagImmediate(X86::ecx);
2761 __ link(notImm1, __ label());
2762 __ link(notImm2, __ label());
2763 emitPutCTIArg(X86::eax, 0);
2764 emitPutCTIArg(X86::ecx, 4);
2765 emitCTICall(i, Interpreter::cti_op_mod);
2766 emitPutVirtualRegister(instruction[i + 1].u.operand);
2771 int dst = instruction[i + 1].u.operand;
2772 int src1 = instruction[i + 2].u.operand;
2773 int src2 = instruction[i + 3].u.operand;
2774 JSValue* src1Value = getConstantImmediateNumericArg(src1);
2775 JSValue* src2Value = getConstantImmediateNumericArg(src2);
2777 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {
2778 __ link(iter->from, __ label());
2779 __ link((++iter)->from, __ label());
2780 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2781 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);
2782 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);
2783 emitCTICall(i, Interpreter::cti_op_mul);
2784 emitPutVirtualRegister(dst);
2785 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {
2786 __ link(iter->from, __ label());
2787 __ link((++iter)->from, __ label());
2788 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2789 emitPutCTIArgFromVirtualRegister(src1, 0, X86::ecx);
2790 emitPutCTIArgFromVirtualRegister(src2, 4, X86::ecx);
2791 emitCTICall(i, Interpreter::cti_op_mul);
2792 emitPutVirtualRegister(dst);
2794 compileBinaryArithOpSlowCase(op_mul, iter, dst, src1, src2, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2801 case op_construct: {
2802 int dst = instruction[i + 1].u.operand;
2803 int callee = instruction[i + 2].u.operand;
2804 int argCount = instruction[i + 3].u.operand;
2805 int registerOffset = instruction[i + 4].u.operand;
2807 __ link(iter->from, __ label());
2809 // The arguments have been set up on the hot path for op_call_eval
2810 if (opcodeID == op_call)
2811 compileOpCallSetupArgs(instruction + i);
2812 else if (opcodeID == op_construct)
2813 compileOpConstructSetupArgs(instruction + i);
2815 // Fast check for JS function.
2816 __ testl_i32r(JSImmediate::TagMask, X86::ecx);
2817 JmpSrc callLinkFailNotObject = __ jne();
2818 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx);
2819 JmpSrc callLinkFailNotJSFunction = __ jne();
2821 // First, in the case of a construct, allocate the new object.
2822 if (opcodeID == op_construct) {
2823 emitCTICall(i, Interpreter::cti_op_construct_JSConstruct);
2824 emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
2825 emitGetVirtualRegister(callee, X86::ecx, i);
2828 __ movl_i32r(argCount, X86::edx);
2830 // Speculatively roll the callframe, assuming argCount will match the arity.
2831 __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi);
2832 __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi);
2834 m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation =
2835 emitNakedCall(i, m_interpreter->m_ctiVirtualCallPreLink);
2837 JmpSrc storeResultForFirstRun = __ jmp();
2839 // This is the address for the cold path *after* the first run (which tries to link the call).
2840 m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = __ label();
2842 // The arguments have been set up on the hot path for op_call_eval
2843 if (opcodeID == op_call)
2844 compileOpCallSetupArgs(instruction + i);
2845 else if (opcodeID == op_construct)
2846 compileOpConstructSetupArgs(instruction + i);
2848 // Check for JSFunctions.
2849 __ testl_i32r(JSImmediate::TagMask, X86::ecx);
2850 JmpSrc isNotObject = __ jne();
2851 __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx);
2852 JmpSrc isJSFunction = __ je();
2854 // This handles host functions
2855 JmpDst notJSFunctionlabel = __ label();
2856 __ link(isNotObject, notJSFunctionlabel);
2857 __ link(callLinkFailNotObject, notJSFunctionlabel);
2858 __ link(callLinkFailNotJSFunction, notJSFunctionlabel);
2859 emitCTICall(i, ((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction));
2860 JmpSrc wasNotJSFunction = __ jmp();
2862 // Next, handle JSFunctions...
2863 __ link(isJSFunction, __ label());
2865 // First, in the case of a construct, allocate the new object.
2866 if (opcodeID == op_construct) {
2867 emitCTICall(i, Interpreter::cti_op_construct_JSConstruct);
2868 emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
2869 emitGetVirtualRegister(callee, X86::ecx, i);
2872 // Speculatively roll the callframe, assuming argCount will match the arity.
2873 __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi);
2874 __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi);
2875 __ movl_i32r(argCount, X86::edx);
2877 emitNakedCall(i, m_interpreter->m_ctiVirtualCall);
2879 // Put the return value in dst. In the interpreter, op_ret does this.
2880 JmpDst storeResult = __ label();
2881 __ link(wasNotJSFunction, storeResult);
2882 __ link(storeResultForFirstRun, storeResult);
2883 emitPutVirtualRegister(dst);
2885 #if ENABLE(CODEBLOCK_SAMPLING)
2886 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
2888 ++callLinkInfoIndex;
2890 i += (opcodeID == op_construct ? 7 : 5);
2893 case op_to_jsnumber: {
2894 if (linkSlowCaseIfNotJSCell(iter, instruction[i + 2].u.operand))
2896 __ link(iter->from, __ label());
2898 emitPutCTIArg(X86::eax, 0);
2899 emitCTICall(i, Interpreter::cti_op_to_jsnumber);
2901 emitPutVirtualRegister(instruction[i + 1].u.operand);
2907 ASSERT_NOT_REACHED();
2911 ASSERT_WITH_MESSAGE((iter + 1) == m_slowCases.end() || firstTo != (iter + 1)->to,"Not enough jumps linked in slow case codegen.");
2912 ASSERT_WITH_MESSAGE(firstTo == iter->to, "Too many jumps linked in slow case codegen.");
2914 __ link(__ jmp(), m_labels[i]);
2917 ASSERT(propertyAccessInstructionIndex == m_codeBlock->propertyAccessInstructions.size());
2918 ASSERT(callLinkInfoIndex == m_codeBlock->callLinkInfos.size());
2921 void JIT::privateCompile()
2923 #if ENABLE(CODEBLOCK_SAMPLING)
2924 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
2926 #if ENABLE(OPCODE_SAMPLING)
2927 __ movl_i32m(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions.begin()), m_interpreter->sampler()->sampleSlot());
2930 // Could use a popl_m, but would need to offset the following instruction if so.
2931 __ popl_r(X86::ecx);
2932 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2934 JmpSrc slowRegisterFileCheck;
2935 JmpDst afterRegisterFileCheck;
2936 if (m_codeBlock->codeType == FunctionCode) {
2937 // In the case of a fast linked call, we do not set this up in the caller.
2938 __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edi);
2940 emitGetCTIParam(CTI_ARGS_registerFile, X86::eax);
2941 __ leal_mr(m_codeBlock->numCalleeRegisters * sizeof(Register), X86::edi, X86::edx);
2942 __ cmpl_mr(FIELD_OFFSET(RegisterFile, m_end), X86::eax, X86::edx);
2943 slowRegisterFileCheck = __ jg();
2944 afterRegisterFileCheck = __ label();
2947 privateCompileMainPass();
2948 privateCompileLinkPass();
2949 privateCompileSlowCases();
2951 if (m_codeBlock->codeType == FunctionCode) {
2952 __ link(slowRegisterFileCheck, __ label());
2953 emitCTICall(0, Interpreter::cti_register_file_check);
2954 JmpSrc backToBody = __ jmp();
2955 __ link(backToBody, afterRegisterFileCheck);
2958 ASSERT(m_jmpTable.isEmpty());
2960 void* code = __ executableCopy();
2962 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2963 for (unsigned i = 0; i < m_switches.size(); ++i) {
2964 SwitchRecord record = m_switches[i];
2965 unsigned bytecodeIndex = record.bytecodeIndex;
2967 if (record.type != SwitchRecord::String) {
2968 ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
2969 ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
2971 record.jumpTable.simpleJumpTable->ctiDefault = __ getRelocatedAddress(code, m_labels[bytecodeIndex + 3 + record.defaultOffset]);
2973 for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
2974 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
2975 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? __ getRelocatedAddress(code, m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
2978 ASSERT(record.type == SwitchRecord::String);
2980 record.jumpTable.stringJumpTable->ctiDefault = __ getRelocatedAddress(code, m_labels[bytecodeIndex + 3 + record.defaultOffset]);
2982 StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
2983 for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
2984 unsigned offset = it->second.branchOffset;
2985 it->second.ctiOffset = offset ? __ getRelocatedAddress(code, m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
2990 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2991 iter->nativeCode = __ getRelocatedAddress(code, m_labels[iter->target]);
2993 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2995 X86Assembler::link(code, iter->from, iter->to);
2996 m_codeBlock->ctiReturnAddressVPCMap.add(__ getRelocatedAddress(code, iter->from), iter->bytecodeIndex);
2999 // Link absolute addresses for jsr
3000 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
3001 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
3003 for (unsigned i = 0; i < m_codeBlock->propertyAccessInstructions.size(); ++i) {
3004 StructureStubInfo& info = m_codeBlock->propertyAccessInstructions[i];
3005 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_propertyAccessCompilationInfo[i].callReturnLocation);
3006 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_propertyAccessCompilationInfo[i].hotPathBegin);
3008 for (unsigned i = 0; i < m_codeBlock->callLinkInfos.size(); ++i) {
3009 CallLinkInfo& info = m_codeBlock->callLinkInfos[i];
3010 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].callReturnLocation);
3011 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].hotPathBegin);
3012 info.hotPathOther = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].hotPathOther);
3013 info.coldPathOther = X86Assembler::getRelocatedAddress(code, m_callStructureStubCompilationInfo[i].coldPathOther);
3016 m_codeBlock->ctiCode = code;
3019 void JIT::privateCompileGetByIdSelf(Structure* structure, size_t cachedOffset, void* returnAddress)
3021 // Check eax is an object of the right Structure.
3022 __ testl_i32r(JSImmediate::TagMask, X86::eax);
3023 JmpSrc failureCases1 = __ jne();
3024 JmpSrc failureCases2 = checkStructure(X86::eax, structure);
3026 // Checks out okay! - getDirectOffset
3027 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
3028 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
3031 void* code = __ executableCopy();
3033 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
3034 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
3036 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3038 ctiRepatchCallByReturnAddress(returnAddress, code);
3041 void JIT::privateCompileGetByIdProto(Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
3043 #if USE(CTI_REPATCH_PIC)
3044 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
3046 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
3047 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
3049 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
3050 // referencing the prototype object - let's speculatively load it's table nice and early!)
3051 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
3052 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
3053 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
3055 // Check eax is an object of the right Structure.
3056 JmpSrc failureCases1 = checkStructure(X86::eax, structure);
3058 // Check the prototype object's Structure had not changed.
3059 Structure** prototypeStructureAddress = &(protoObject->m_structure);
3060 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
3061 JmpSrc failureCases2 = __ jne();
3063 // Checks out okay! - getDirectOffset
3064 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
3066 JmpSrc success = __ jmp();
3068 void* code = __ executableCopy();
3070 // Use the repatch information to link the failure cases back to the original slow case routine.
3071 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
3072 X86Assembler::link(code, failureCases1, slowCaseBegin);
3073 X86Assembler::link(code, failureCases2, slowCaseBegin);
3075 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3076 intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3077 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3079 // Track the stub we have created so that it will be deleted later.
3080 info.stubRoutine = code;
3082 // Finally repatch the jump to slow case back in the hot path to jump here instead.
3083 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3084 X86Assembler::repatchBranchOffset(jmpLocation, code);
3086 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
3087 // referencing the prototype object - let's speculatively load it's table nice and early!)
3088 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
3089 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
3090 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
3092 // Check eax is an object of the right Structure.
3093 __ testl_i32r(JSImmediate::TagMask, X86::eax);
3094 JmpSrc failureCases1 = __ jne();
3095 JmpSrc failureCases2 = checkStructure(X86::eax, structure);
3097 // Check the prototype object's Structure had not changed.
3098 Structure** prototypeStructureAddress = &(protoObject->m_structure);
3099 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
3100 JmpSrc failureCases3 = __ jne();
3102 // Checks out okay! - getDirectOffset
3103 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
3107 void* code = __ executableCopy();
3109 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
3110 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
3111 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
3113 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3115 ctiRepatchCallByReturnAddress(returnAddress, code);
3119 #if USE(CTI_REPATCH_PIC)
3120 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
3122 JmpSrc failureCase = checkStructure(X86::eax, structure);
3123 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
3124 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
3125 JmpSrc success = __ jmp();
3127 void* code = __ executableCopy();
3130 // Use the repatch information to link the failure cases back to the original slow case routine.
3131 void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
3132 if (!lastProtoBegin)
3133 lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
3135 X86Assembler::link(code, failureCase, lastProtoBegin);
3137 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3138 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3139 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3142 polymorphicStructures->list[currentIndex].set(cachedOffset, code, structure);
3144 // Finally repatch the jump to slow case back in the hot path to jump here instead.
3145 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3146 X86Assembler::repatchBranchOffset(jmpLocation, code);
3149 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
3151 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
3152 // referencing the prototype object - let's speculatively load it's table nice and early!)
3153 JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
3154 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
3155 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
3157 // Check eax is an object of the right Structure.
3158 JmpSrc failureCases1 = checkStructure(X86::eax, structure);
3160 // Check the prototype object's Structure had not changed.
3161 Structure** prototypeStructureAddress = &(protoObject->m_structure);
3162 __ cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructure), prototypeStructureAddress);
3163 JmpSrc failureCases2 = __ jne();
3165 // Checks out okay! - getDirectOffset
3166 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
3168 JmpSrc success = __ jmp();
3170 void* code = __ executableCopy();
3172 // Use the repatch information to link the failure cases back to the original slow case routine.
3173 void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
3174 X86Assembler::link(code, failureCases1, lastProtoBegin);
3175 X86Assembler::link(code, failureCases2, lastProtoBegin);
3177 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3178 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3179 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3182 prototypeStructure->ref();
3183 prototypeStructures->list[currentIndex].set(cachedOffset, code, structure, prototypeStructure);
3185 // Finally repatch the jump to slow case back in the hot path to jump here instead.
3186 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3187 X86Assembler::repatchBranchOffset(jmpLocation, code);
3190 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
3194 Vector<JmpSrc> bucketsOfFail;
3196 // Check eax is an object of the right Structure.
3197 bucketsOfFail.append(checkStructure(X86::eax, structure));
3199 Structure* currStructure = structure;
3200 RefPtr<Structure>* chainEntries = chain->head();
3201 JSObject* protoObject = 0;
3202 for (unsigned i = 0; i < count; ++i) {
3203 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
3204 currStructure = chainEntries[i].get();
3206 // Check the prototype object's Structure had not changed.
3207 Structure** prototypeStructureAddress = &(protoObject->m_structure);
3208 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
3209 bucketsOfFail.append(__ jne());
3211 ASSERT(protoObject);
3213 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
3214 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
3215 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
3216 JmpSrc success = __ jmp();
3218 void* code = __ executableCopy();
3220 // Use the repatch information to link the failure cases back to the original slow case routine.
3221 void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
3223 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
3224 X86Assembler::link(code, bucketsOfFail[i], lastProtoBegin);
3226 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3227 intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3228 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3230 // Track the stub we have created so that it will be deleted later.
3233 prototypeStructures->list[currentIndex].set(cachedOffset, code, structure, chain);
3235 // Finally repatch the jump to slow case back in the hot path to jump here instead.
3236 intptr_t jmpLocation = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3237 X86Assembler::repatchBranchOffset(jmpLocation, code);
3241 void JIT::privateCompileGetByIdChain(Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
3243 #if USE(CTI_REPATCH_PIC)
3244 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
3246 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
3247 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
3251 Vector<JmpSrc> bucketsOfFail;
3253 // Check eax is an object of the right Structure.
3254 bucketsOfFail.append(checkStructure(X86::eax, structure));
3256 Structure* currStructure = structure;
3257 RefPtr<Structure>* chainEntries = chain->head();
3258 JSObject* protoObject = 0;
3259 for (unsigned i = 0; i < count; ++i) {
3260 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
3261 currStructure = chainEntries[i].get();
3263 // Check the prototype object's Structure had not changed.
3264 Structure** prototypeStructureAddress = &(protoObject->m_structure);
3265 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
3266 bucketsOfFail.append(__ jne());
3268 ASSERT(protoObject);
3270 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
3271 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
3272 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
3273 JmpSrc success = __ jmp();
3275 void* code = __ executableCopy();
3277 // Use the repatch information to link the failure cases back to the original slow case routine.
3278 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
3280 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
3281 X86Assembler::link(code, bucketsOfFail[i], slowCaseBegin);
3283 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3284 intptr_t successDest = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3285 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3287 // Track the stub we have created so that it will be deleted later.
3288 info.stubRoutine = code;
3290 // Finally repatch the jump to slow case back in the hot path to jump here instead.
3291 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3292 X86Assembler::repatchBranchOffset(jmpLocation, code);
3296 Vector<JmpSrc> bucketsOfFail;
3298 // Check eax is an object of the right Structure.
3299 __ testl_i32r(JSImmediate::TagMask, X86::eax);
3300 bucketsOfFail.append(__ jne());
3301 bucketsOfFail.append(checkStructure(X86::eax, structure));
3303 Structure* currStructure = structure;
3304 RefPtr<Structure>* chainEntries = chain->head();
3305 JSObject* protoObject = 0;
3306 for (unsigned i = 0; i < count; ++i) {
3307 protoObject = asObject(currStructure->prototypeForLookup(callFrame));
3308 currStructure = chainEntries[i].get();
3310 // Check the prototype object's Structure had not changed.
3311 Structure** prototypeStructureAddress = &(protoObject->m_structure);
3312 __ cmpl_i32m(reinterpret_cast<uint32_t>(currStructure), prototypeStructureAddress);
3313 bucketsOfFail.append(__ jne());
3315 ASSERT(protoObject);
3317 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
3318 __ movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
3319 __ movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
3322 void* code = __ executableCopy();
3324 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
3325 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
3327 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3329 ctiRepatchCallByReturnAddress(returnAddress, code);
3333 void JIT::privateCompilePutByIdReplace(Structure* structure, size_t cachedOffset, void* returnAddress)
3335 // Check eax is an object of the right Structure.
3336 __ testl_i32r(JSImmediate::TagMask, X86::eax);
3337 JmpSrc failureCases1 = __ jne();
3338 JmpSrc failureCases2 = checkStructure(X86::eax, structure);
3340 // checks out okay! - putDirectOffset
3341 __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
3342 __ movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
3345 void* code = __ executableCopy();
3347 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
3348 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
3350 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3352 ctiRepatchCallByReturnAddress(returnAddress, code);