2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "JSFunction.h"
35 #include "wrec/WREC.h"
36 #include "ResultType.h"
39 #include <sys/sysctl.h>
48 static inline bool isSSE2Present()
50 return true; // All X86 Macs are guaranteed to support at least SSE2
55 static bool isSSE2Present()
57 static const int SSE2FeatureBit = 1 << 26;
64 mov eax, 1 // cpuid function 1 gives us the standard feature set
70 // FIXME: Add GCC code to do above asm
72 present = (flags & SSE2FeatureBit) != 0;
76 static SSE2Check check;
82 COMPILE_ASSERT(CTI_ARGS_code == 0xC, CTI_ARGS_code_is_C);
83 COMPILE_ASSERT(CTI_ARGS_callFrame == 0xE, CTI_ARGS_callFrame_is_E);
85 #if COMPILER(GCC) && PLATFORM(X86)
88 #define SYMBOL_STRING(name) "_" #name
90 #define SYMBOL_STRING(name) #name
94 ".globl " SYMBOL_STRING(ctiTrampoline) "\n"
95 SYMBOL_STRING(ctiTrampoline) ":" "\n"
98 "subl $0x24, %esp" "\n"
99 "movl $512, %esi" "\n"
100 "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = CTI_ARGS_callFrame (see assertion above)
101 "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
102 "addl $0x24, %esp" "\n"
109 ".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
110 SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
111 #if USE(CTI_ARGUMENT)
112 #if USE(FAST_CALL_CTI_ARGUMENT)
113 "movl %esp, %ecx" "\n"
115 "movl %esp, 0(%esp)" "\n"
117 "call " SYMBOL_STRING(_ZN3JSC7Machine12cti_vm_throwEPPv) "\n"
119 "call " SYMBOL_STRING(_ZN3JSC7Machine12cti_vm_throwEPvz) "\n"
121 "addl $0x24, %esp" "\n"
131 __declspec(naked) JSValue* ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue** exception, Profiler**, JSGlobalData*)
139 mov edi, [esp + 0x38];
140 call [esp + 0x30]; // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
148 __declspec(naked) void ctiVMThrowTrampoline()
152 call JSC::Machine::cti_vm_throw;
164 ALWAYS_INLINE bool CTI::isConstant(int src)
166 return src >= m_codeBlock->numVars && src < m_codeBlock->numVars + m_codeBlock->numConstants;
169 ALWAYS_INLINE JSValue* CTI::getConstant(CallFrame* callFrame, int src)
171 return m_codeBlock->constantRegisters[src - m_codeBlock->numVars].jsValue(callFrame);
174 // get arg puts an arg from the SF register array into a h/w register
175 ALWAYS_INLINE void CTI::emitGetArg(int src, X86Assembler::RegisterID dst)
177 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
178 if (isConstant(src)) {
179 JSValue* js = getConstant(m_callFrame, src);
180 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
182 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
185 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
186 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
188 if (isConstant(src)) {
189 JSValue* js = getConstant(m_callFrame, src);
190 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
192 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
193 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
197 // puts an arg onto the stack, as an arg to a context threaded function.
198 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
200 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
203 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
205 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
208 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
210 if (isConstant(src)) {
211 JSValue* js = getConstant(m_callFrame, src);
212 return JSImmediate::isNumber(js) ? js : 0;
217 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
219 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
222 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
224 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
227 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
229 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
232 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
234 m_jit.movl_rm(from, entry * sizeof(Register), X86::edi);
237 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
239 m_jit.movl_mr(entry * sizeof(Register), X86::edi, to);
242 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
244 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
245 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
248 ALWAYS_INLINE void CTI::emitInitRegister(unsigned dst)
250 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
251 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
254 #if ENABLE(SAMPLING_TOOL)
255 unsigned inCalledCode = 0;
258 void ctiSetReturnAddress(void** where, void* what)
263 void ctiRepatchCallByReturnAddress(void* where, void* what)
265 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
270 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
273 if (isConstant(src1)) {
274 JSValue* js = getConstant(m_callFrame, src1);
276 JSImmediate::isImmediate(js) ?
277 (JSImmediate::isNumber(js) ? 'i' :
278 JSImmediate::isBoolean(js) ? 'b' :
279 js->isUndefined() ? 'u' :
280 js->isNull() ? 'n' : '?')
282 (js->isString() ? 's' :
283 js->isObject() ? 'o' :
287 if (isConstant(src2)) {
288 JSValue* js = getConstant(m_callFrame, src2);
290 JSImmediate::isImmediate(js) ?
291 (JSImmediate::isNumber(js) ? 'i' :
292 JSImmediate::isBoolean(js) ? 'b' :
293 js->isUndefined() ? 'u' :
294 js->isNull() ? 'n' : '?')
296 (js->isString() ? 's' :
297 js->isObject() ? 'o' :
300 if ((which1 != '*') | (which2 != '*'))
301 fprintf(stderr, "Types %c %c\n", which1, which2);
306 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, X86::RegisterID r)
308 m_jit.emitRestoreArgumentReference();
309 X86Assembler::JmpSrc call = m_jit.emitCall(r);
310 m_calls.append(CallRecord(call, opcodeIndex));
315 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(unsigned opcodeIndex, CTIHelper_j helper)
317 #if ENABLE(SAMPLING_TOOL)
318 m_jit.movl_i32m(1, &inCalledCode);
320 m_jit.emitRestoreArgumentReference();
321 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
322 X86Assembler::JmpSrc call = m_jit.emitCall();
323 m_calls.append(CallRecord(call, helper, opcodeIndex));
324 #if ENABLE(SAMPLING_TOOL)
325 m_jit.movl_i32m(0, &inCalledCode);
331 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(unsigned opcodeIndex, CTIHelper_p helper)
333 #if ENABLE(SAMPLING_TOOL)
334 m_jit.movl_i32m(1, &inCalledCode);
336 m_jit.emitRestoreArgumentReference();
337 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
338 X86Assembler::JmpSrc call = m_jit.emitCall();
339 m_calls.append(CallRecord(call, helper, opcodeIndex));
340 #if ENABLE(SAMPLING_TOOL)
341 m_jit.movl_i32m(0, &inCalledCode);
347 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(unsigned opcodeIndex, CTIHelper_b helper)
349 #if ENABLE(SAMPLING_TOOL)
350 m_jit.movl_i32m(1, &inCalledCode);
352 m_jit.emitRestoreArgumentReference();
353 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
354 X86Assembler::JmpSrc call = m_jit.emitCall();
355 m_calls.append(CallRecord(call, helper, opcodeIndex));
356 #if ENABLE(SAMPLING_TOOL)
357 m_jit.movl_i32m(0, &inCalledCode);
363 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(unsigned opcodeIndex, CTIHelper_v helper)
365 #if ENABLE(SAMPLING_TOOL)
366 m_jit.movl_i32m(1, &inCalledCode);
368 m_jit.emitRestoreArgumentReference();
369 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
370 X86Assembler::JmpSrc call = m_jit.emitCall();
371 m_calls.append(CallRecord(call, helper, opcodeIndex));
372 #if ENABLE(SAMPLING_TOOL)
373 m_jit.movl_i32m(0, &inCalledCode);
379 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(unsigned opcodeIndex, CTIHelper_s helper)
381 #if ENABLE(SAMPLING_TOOL)
382 m_jit.movl_i32m(1, &inCalledCode);
384 m_jit.emitRestoreArgumentReference();
385 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
386 X86Assembler::JmpSrc call = m_jit.emitCall();
387 m_calls.append(CallRecord(call, helper, opcodeIndex));
388 #if ENABLE(SAMPLING_TOOL)
389 m_jit.movl_i32m(0, &inCalledCode);
395 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCTICall(unsigned opcodeIndex, CTIHelper_2 helper)
397 #if ENABLE(SAMPLING_TOOL)
398 m_jit.movl_i32m(1, &inCalledCode);
400 m_jit.emitRestoreArgumentReference();
401 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
402 X86Assembler::JmpSrc call = m_jit.emitCall();
403 m_calls.append(CallRecord(call, helper, opcodeIndex));
404 #if ENABLE(SAMPLING_TOOL)
405 m_jit.movl_i32m(0, &inCalledCode);
411 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
413 m_jit.testl_i32r(JSImmediate::TagMask, reg);
414 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
417 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
419 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
420 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
423 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
425 m_jit.movl_rr(reg1, X86::ecx);
426 m_jit.andl_rr(reg2, X86::ecx);
427 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
430 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
432 ASSERT(JSImmediate::isNumber(imm));
433 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
436 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
438 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
441 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitFastArithDeTagImmediateJumpIfZero(X86Assembler::RegisterID reg)
443 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
444 return m_jit.emitUnlinkedJe();
447 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
449 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
452 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
454 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
457 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
459 m_jit.sarl_i8r(1, reg);
462 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
464 m_jit.addl_rr(reg, reg);
465 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
466 emitFastArithReTagImmediate(reg);
469 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
471 m_jit.addl_rr(reg, reg);
472 emitFastArithReTagImmediate(reg);
475 ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
477 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
478 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
481 CTI::CTI(Machine* machine, CallFrame* callFrame, CodeBlock* codeBlock)
482 : m_jit(machine->jitCodeBuffer())
484 , m_callFrame(callFrame)
485 , m_codeBlock(codeBlock)
486 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
487 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
491 #define CTI_COMPILE_BINARY_OP(name) \
493 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
494 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
495 emitCTICall(i, Machine::cti_##name); \
496 emitPutResult(instruction[i + 1].u.operand); \
501 #define CTI_COMPILE_UNARY_OP(name) \
503 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
504 emitCTICall(i, Machine::cti_##name); \
505 emitPutResult(instruction[i + 1].u.operand); \
510 #if ENABLE(SAMPLING_TOOL)
511 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
514 void CTI::compileOpCallInitializeCallFrame(unsigned callee, unsigned argCount)
516 emitGetArg(callee, X86::ecx); // Load callee JSFunction into ecx
517 m_jit.movl_rm(X86::eax, RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edx); // callee CodeBlock was returned in eax
518 m_jit.movl_i32m(reinterpret_cast<unsigned>(nullJSValue), RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)), X86::edx);
519 m_jit.movl_rm(X86::ecx, RegisterFile::Callee * static_cast<int>(sizeof(Register)), X86::edx);
521 m_jit.movl_mr(OBJECT_OFFSET(JSFunction, m_scopeChain) + OBJECT_OFFSET(ScopeChain, m_node), X86::ecx, X86::ecx); // newScopeChain
522 m_jit.movl_i32m(argCount, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)), X86::edx);
523 m_jit.movl_rm(X86::edi, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register)), X86::edx);
524 m_jit.movl_rm(X86::ecx, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)), X86::edx);
527 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
529 int dst = instruction[i + 1].u.operand;
530 int callee = instruction[i + 2].u.operand;
531 int firstArg = instruction[i + 4].u.operand;
532 int argCount = instruction[i + 5].u.operand;
533 int registerOffset = instruction[i + 6].u.operand;
535 if (type == OpCallEval)
536 emitGetPutArg(instruction[i + 3].u.operand, 16, X86::ecx);
538 if (type == OpConstruct) {
539 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 20);
540 emitPutArgConstant(argCount, 16);
541 emitPutArgConstant(registerOffset, 12);
542 emitPutArgConstant(firstArg, 8);
543 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
545 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 12);
546 emitPutArgConstant(argCount, 8);
547 emitPutArgConstant(registerOffset, 4);
549 int thisVal = instruction[i + 3].u.operand;
550 if (thisVal == missingThisObjectMarker()) {
551 // FIXME: should this be loaded dynamically off m_callFrame?
552 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_callFrame->globalThisValue()), firstArg * sizeof(Register), X86::edi);
554 emitGetArg(thisVal, X86::ecx);
555 emitPutResult(firstArg, X86::ecx);
559 X86Assembler::JmpSrc wasEval;
560 if (type == OpCallEval) {
561 emitGetPutArg(callee, 0, X86::ecx);
562 emitCTICall(i, Machine::cti_op_call_eval);
564 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
565 wasEval = m_jit.emitUnlinkedJne();
567 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
568 emitGetArg(callee, X86::ecx);
570 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
571 emitGetPutArg(callee, 0, X86::ecx);
574 // Fast check for JS function.
575 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
576 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
577 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
578 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
579 m_jit.link(isNotObject, m_jit.label());
581 // This handles host functions
582 emitCTICall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
584 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
585 m_jit.link(isJSFunction, m_jit.label());
587 // This handles JSFunctions
588 emitCTICall(i, (type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction);
590 compileOpCallInitializeCallFrame(callee, argCount);
592 // load ctiCode from the new codeBlock.
593 m_jit.movl_mr(OBJECT_OFFSET(CodeBlock, ctiCode), X86::eax, X86::eax);
595 // Put the new value of 'callFrame' into edi and onto the stack, too.
596 m_jit.movl_rr(X86::edx, X86::edi);
598 // Check the ctiCode has been generated - if not, this is handled in a slow case.
599 m_jit.testl_rr(X86::eax, X86::eax);
600 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
601 emitCall(i, X86::eax);
603 X86Assembler::JmpDst end = m_jit.label();
604 m_jit.link(wasNotJSFunction, end);
605 if (type == OpCallEval)
606 m_jit.link(wasEval, end);
608 // Put the return value in dst. In the interpreter, op_ret does this.
612 void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
614 bool negated = (type == OpNStrictEq);
616 unsigned dst = instruction[i + 1].u.operand;
617 unsigned src1 = instruction[i + 2].u.operand;
618 unsigned src2 = instruction[i + 3].u.operand;
620 emitGetArg(src1, X86::eax);
621 emitGetArg(src2, X86::edx);
623 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
624 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
625 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
626 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
628 m_jit.cmpl_rr(X86::edx, X86::eax);
630 m_jit.setne_r(X86::eax);
632 m_jit.sete_r(X86::eax);
633 m_jit.movzbl_rr(X86::eax, X86::eax);
634 emitTagAsBoolImmediate(X86::eax);
636 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
638 m_jit.link(firstNotImmediate, m_jit.label());
640 // check that edx is immediate but not the zero immediate
641 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
642 m_jit.setz_r(X86::ecx);
643 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
644 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
645 m_jit.sete_r(X86::edx);
646 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
647 m_jit.orl_rr(X86::ecx, X86::edx);
649 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
651 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
653 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
655 m_jit.link(secondNotImmediate, m_jit.label());
656 // check that eax is not the zero immediate (we know it must be immediate)
657 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
658 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
660 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
662 m_jit.link(bothWereImmediates, m_jit.label());
663 m_jit.link(firstWasNotImmediate, m_jit.label());
668 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
670 m_jit.subl_i8r(1, X86::esi);
671 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
672 emitCTICall(opcodeIndex, Machine::cti_timeout_check);
674 emitGetCTIParam(CTI_ARGS_globalData, X86::ecx);
675 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
676 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
677 m_jit.link(skipTimeout, m_jit.label());
681 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
683 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
684 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
686 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
687 control will fall through from the code planted.
689 void CTI::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
691 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
692 m_jit.cvttsd2si_rr(xmmSource, tempReg1);
693 m_jit.addl_rr(tempReg1, tempReg1);
694 m_jit.sarl_i8r(1, tempReg1);
695 m_jit.cvtsi2sd_rr(tempReg1, tempXmm);
696 // Compare & branch if immediate.
697 m_jit.ucomis_rr(tempXmm, xmmSource);
698 X86Assembler::JmpSrc resultIsImm = m_jit.emitUnlinkedJe();
699 X86Assembler::JmpDst resultLookedLikeImmButActuallyIsnt = m_jit.label();
701 // Store the result to the JSNumberCell and jump.
702 m_jit.movsd_rm(xmmSource, OBJECT_OFFSET(JSNumberCell, m_value), jsNumberCell);
703 emitPutResult(dst, jsNumberCell);
704 *wroteJSNumberCell = m_jit.emitUnlinkedJmp();
706 m_jit.link(resultIsImm, m_jit.label());
707 // value == (double)(JSImmediate)value... or at least, it looks that way...
708 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
709 m_jit.link(m_jit.emitUnlinkedJp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
710 m_jit.pextrw_irr(3, xmmSource, tempReg2);
711 m_jit.cmpl_i32r(0x8000, tempReg2);
712 m_jit.link(m_jit.emitUnlinkedJe(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
713 // Yes it really really really is representable as a JSImmediate.
714 emitFastArithIntToImmNoCheck(tempReg1);
715 emitPutResult(dst, X86::ecx);
718 void CTI::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
720 StructureID* numberStructureID = m_callFrame->globalData().numberStructureID.get();
721 X86Assembler::JmpSrc wasJSNumberCell1, wasJSNumberCell1b, wasJSNumberCell2, wasJSNumberCell2b;
723 emitGetArg(src1, X86::eax);
724 emitGetArg(src2, X86::edx);
726 if (types.second().isReusable() && isSSE2Present()) {
727 ASSERT(types.second().mightBeNumber());
729 // Check op2 is a number
730 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
731 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
732 if (!types.second().definitelyIsNumber()) {
733 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
734 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
735 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
738 // (1) In this case src2 is a reusable number cell.
739 // Slow case if src1 is not a number type.
740 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
741 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
742 if (!types.first().definitelyIsNumber()) {
743 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
744 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
745 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
748 // (1a) if we get here, src1 is also a number cell
749 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
750 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
751 // (1b) if we get here, src1 is an immediate
752 m_jit.link(op1imm, m_jit.label());
753 emitFastArithImmToInt(X86::eax);
754 m_jit.cvtsi2sd_rr(X86::eax, X86::xmm0);
756 m_jit.link(loadedDouble, m_jit.label());
757 if (opcodeID == op_add)
758 m_jit.addsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
759 else if (opcodeID == op_sub)
760 m_jit.subsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
762 ASSERT(opcodeID == op_mul);
763 m_jit.mulsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
766 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
767 wasJSNumberCell2b = m_jit.emitUnlinkedJmp();
769 // (2) This handles cases where src2 is an immediate number.
770 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
771 m_jit.link(op2imm, m_jit.label());
772 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
773 } else if (types.first().isReusable() && isSSE2Present()) {
774 ASSERT(types.first().mightBeNumber());
776 // Check op1 is a number
777 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
778 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
779 if (!types.first().definitelyIsNumber()) {
780 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
781 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
782 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
785 // (1) In this case src1 is a reusable number cell.
786 // Slow case if src2 is not a number type.
787 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
788 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
789 if (!types.second().definitelyIsNumber()) {
790 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
791 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
792 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
795 // (1a) if we get here, src2 is also a number cell
796 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
797 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
798 // (1b) if we get here, src2 is an immediate
799 m_jit.link(op2imm, m_jit.label());
800 emitFastArithImmToInt(X86::edx);
801 m_jit.cvtsi2sd_rr(X86::edx, X86::xmm1);
803 m_jit.link(loadedDouble, m_jit.label());
804 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
805 if (opcodeID == op_add)
806 m_jit.addsd_rr(X86::xmm1, X86::xmm0);
807 else if (opcodeID == op_sub)
808 m_jit.subsd_rr(X86::xmm1, X86::xmm0);
810 ASSERT(opcodeID == op_mul);
811 m_jit.mulsd_rr(X86::xmm1, X86::xmm0);
813 m_jit.movsd_rm(X86::xmm0, OBJECT_OFFSET(JSNumberCell, m_value), X86::eax);
816 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
817 wasJSNumberCell1b = m_jit.emitUnlinkedJmp();
819 // (2) This handles cases where src1 is an immediate number.
820 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
821 m_jit.link(op1imm, m_jit.label());
822 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
824 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
826 if (opcodeID == op_add) {
827 emitFastArithDeTagImmediate(X86::eax);
828 m_jit.addl_rr(X86::edx, X86::eax);
829 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
830 } else if (opcodeID == op_sub) {
831 m_jit.subl_rr(X86::edx, X86::eax);
832 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
833 emitFastArithReTagImmediate(X86::eax);
835 ASSERT(opcodeID == op_mul);
836 // convert eax & edx from JSImmediates to ints, and check if either are zero
837 emitFastArithImmToInt(X86::edx);
838 X86Assembler::JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
839 m_jit.testl_rr(X86::edx, X86::edx);
840 X86Assembler::JmpSrc op2NonZero = m_jit.emitUnlinkedJne();
841 m_jit.link(op1Zero, m_jit.label());
842 // if either input is zero, add the two together, and check if the result is < 0.
843 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
844 m_jit.movl_rr(X86::eax, X86::ecx);
845 m_jit.addl_rr(X86::edx, X86::ecx);
846 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJs(), i));
847 // Skip the above check if neither input is zero
848 m_jit.link(op2NonZero, m_jit.label());
849 m_jit.imull_rr(X86::edx, X86::eax);
850 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
851 emitFastArithReTagImmediate(X86::eax);
855 if (types.second().isReusable() && isSSE2Present()) {
856 m_jit.link(wasJSNumberCell2, m_jit.label());
857 m_jit.link(wasJSNumberCell2b, m_jit.label());
859 else if (types.first().isReusable() && isSSE2Present()) {
860 m_jit.link(wasJSNumberCell1, m_jit.label());
861 m_jit.link(wasJSNumberCell1b, m_jit.label());
865 void CTI::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
867 X86Assembler::JmpDst here = m_jit.label();
868 m_jit.link(iter->from, here);
869 if (types.second().isReusable() && isSSE2Present()) {
870 if (!types.first().definitelyIsNumber()) {
871 m_jit.link((++iter)->from, here);
872 m_jit.link((++iter)->from, here);
874 if (!types.second().definitelyIsNumber()) {
875 m_jit.link((++iter)->from, here);
876 m_jit.link((++iter)->from, here);
878 m_jit.link((++iter)->from, here);
879 } else if (types.first().isReusable() && isSSE2Present()) {
880 if (!types.first().definitelyIsNumber()) {
881 m_jit.link((++iter)->from, here);
882 m_jit.link((++iter)->from, here);
884 if (!types.second().definitelyIsNumber()) {
885 m_jit.link((++iter)->from, here);
886 m_jit.link((++iter)->from, here);
888 m_jit.link((++iter)->from, here);
890 m_jit.link((++iter)->from, here);
892 // additional entry point to handle -0 cases.
893 if (opcodeID == op_mul)
894 m_jit.link((++iter)->from, here);
896 emitGetPutArg(src1, 0, X86::ecx);
897 emitGetPutArg(src2, 4, X86::ecx);
898 if (opcodeID == op_add)
899 emitCTICall(i, Machine::cti_op_add);
900 else if (opcodeID == op_sub)
901 emitCTICall(i, Machine::cti_op_sub);
903 ASSERT(opcodeID == op_mul);
904 emitCTICall(i, Machine::cti_op_mul);
909 void CTI::privateCompileMainPass()
911 Instruction* instruction = m_codeBlock->instructions.begin();
912 unsigned instructionCount = m_codeBlock->instructions.size();
914 unsigned structureIDInstructionIndex = 0;
916 for (unsigned i = 0; i < instructionCount; ) {
917 m_labels[i] = m_jit.label();
919 #if ENABLE(SAMPLING_TOOL)
920 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
923 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
924 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
926 unsigned src = instruction[i + 2].u.operand;
928 m_jit.movl_i32r(reinterpret_cast<unsigned>(getConstant(m_callFrame, src)), X86::edx);
930 emitGetArg(src, X86::edx);
931 emitPutResult(instruction[i + 1].u.operand, X86::edx);
936 unsigned dst = instruction[i + 1].u.operand;
937 unsigned src1 = instruction[i + 2].u.operand;
938 unsigned src2 = instruction[i + 3].u.operand;
940 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
941 emitGetArg(src2, X86::edx);
942 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
943 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
944 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
945 emitPutResult(dst, X86::edx);
946 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
947 emitGetArg(src1, X86::eax);
948 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
949 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
950 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
953 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
954 if (types.first().mightBeNumber() && types.second().mightBeNumber())
955 compileBinaryArithOp(op_add, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
957 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
958 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
959 emitCTICall(i, Machine::cti_op_add);
960 emitPutResult(instruction[i + 1].u.operand);
968 if (m_codeBlock->needsFullScopeChain)
969 emitCTICall(i, Machine::cti_op_end);
970 emitGetArg(instruction[i + 1].u.operand, X86::eax);
971 #if ENABLE(SAMPLING_TOOL)
972 m_jit.movl_i32m(-1, ¤tOpcodeID);
974 m_jit.pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
980 unsigned target = instruction[i + 1].u.operand;
981 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
986 int srcDst = instruction[i + 1].u.operand;
987 emitGetArg(srcDst, X86::eax);
988 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
989 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
990 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
991 emitPutResult(srcDst, X86::eax);
996 emitSlowScriptCheck(i);
998 unsigned target = instruction[i + 1].u.operand;
999 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
1003 case op_loop_if_less: {
1004 emitSlowScriptCheck(i);
1006 unsigned target = instruction[i + 3].u.operand;
1007 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1009 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1010 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1011 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1012 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
1014 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1015 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1016 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1017 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1018 m_jit.cmpl_rr(X86::edx, X86::eax);
1019 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
1024 case op_loop_if_lesseq: {
1025 emitSlowScriptCheck(i);
1027 unsigned target = instruction[i + 3].u.operand;
1028 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1030 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1031 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1032 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1033 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
1035 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1036 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1037 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1038 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1039 m_jit.cmpl_rr(X86::edx, X86::eax);
1040 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
1045 case op_new_object: {
1046 emitCTICall(i, Machine::cti_op_new_object);
1047 emitPutResult(instruction[i + 1].u.operand);
1051 case op_put_by_id: {
1052 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
1053 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1054 // such that the StructureID & offset are always at the same distance from this.
1056 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1057 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1059 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1060 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1061 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1062 ++structureIDInstructionIndex;
1064 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
1065 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1066 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1067 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1068 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
1069 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1071 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1072 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1073 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
1074 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
1079 case op_get_by_id: {
1080 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
1081 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
1082 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1083 // to jump back to if one of these trampolies finds a match.
1085 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1087 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1089 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1090 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1091 ++structureIDInstructionIndex;
1093 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1094 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1095 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
1096 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1097 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
1099 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1100 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
1101 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
1102 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1107 case op_instanceof: {
1108 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
1109 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
1110 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
1112 // check if any are immediates
1113 m_jit.orl_rr(X86::eax, X86::ecx);
1114 m_jit.orl_rr(X86::edx, X86::ecx);
1115 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
1117 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
1119 // check that all are object type - this is a bit of a bithack to avoid excess branching;
1120 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
1121 // this works because NumberType and StringType are smaller
1122 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
1123 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
1124 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1125 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
1126 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
1127 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
1128 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1129 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
1131 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1133 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
1134 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
1135 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
1136 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
1138 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1140 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
1141 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
1143 // optimistically load true result
1144 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(true)), X86::eax);
1146 X86Assembler::JmpDst loop = m_jit.label();
1148 // load value's prototype
1149 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
1150 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
1152 m_jit.cmpl_rr(X86::ecx, X86::edx);
1153 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
1155 m_jit.cmpl_i32r(reinterpret_cast<int32_t>(jsNull()), X86::ecx);
1156 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
1157 m_jit.link(goToLoop, loop);
1159 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(false)), X86::eax);
1161 m_jit.link(exit, m_jit.label());
1163 emitPutResult(instruction[i + 1].u.operand);
1168 case op_del_by_id: {
1169 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1170 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1171 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1172 emitCTICall(i, Machine::cti_op_del_by_id);
1173 emitPutResult(instruction[i + 1].u.operand);
1178 unsigned dst = instruction[i + 1].u.operand;
1179 unsigned src1 = instruction[i + 2].u.operand;
1180 unsigned src2 = instruction[i + 3].u.operand;
1182 // For now, only plant a fast int case if the constant operand is greater than zero.
1183 JSValue* src1Value = getConstantImmediateNumericArg(src1);
1184 JSValue* src2Value = getConstantImmediateNumericArg(src2);
1186 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {
1187 emitGetArg(src2, X86::eax);
1188 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1189 emitFastArithDeTagImmediate(X86::eax);
1190 m_jit.imull_i32r(X86::eax, value, X86::eax);
1191 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1192 emitFastArithReTagImmediate(X86::eax);
1194 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {
1195 emitGetArg(src1, X86::eax);
1196 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1197 emitFastArithDeTagImmediate(X86::eax);
1198 m_jit.imull_i32r(X86::eax, value, X86::eax);
1199 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1200 emitFastArithReTagImmediate(X86::eax);
1203 compileBinaryArithOp(op_mul, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1209 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
1210 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1211 emitCTICall(i, Machine::cti_op_new_func);
1212 emitPutResult(instruction[i + 1].u.operand);
1217 compileOpCall(instruction, i);
1221 case op_get_global_var: {
1222 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
1223 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1224 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
1225 emitPutResult(instruction[i + 1].u.operand, X86::eax);
1229 case op_put_global_var: {
1230 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
1231 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1232 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1233 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
1237 case op_get_scoped_var: {
1238 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
1240 emitGetArg(RegisterFile::ScopeChain, X86::eax);
1242 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
1244 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
1245 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
1246 emitPutResult(instruction[i + 1].u.operand);
1250 case op_put_scoped_var: {
1251 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
1253 emitGetArg(RegisterFile::ScopeChain, X86::edx);
1254 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1256 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
1258 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
1259 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
1263 case op_tear_off_activation: {
1264 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1265 emitCTICall(i, Machine::cti_op_tear_off_activation);
1269 case op_tear_off_arguments: {
1270 emitCTICall(i, Machine::cti_op_tear_off_arguments);
1275 // Check for a profiler - if there is one, jump to the hook below.
1276 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
1277 m_jit.cmpl_i32m(0, X86::eax);
1278 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
1279 X86Assembler::JmpDst profiled = m_jit.label();
1281 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
1282 if (m_codeBlock->needsFullScopeChain)
1283 emitCTICall(i, Machine::cti_op_ret_scopeChain);
1285 // Return the result in %eax.
1286 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1288 // Grab the return address.
1289 emitGetArg(RegisterFile::ReturnPC, X86::edx);
1291 // Restore our caller's "r".
1292 emitGetArg(RegisterFile::CallerFrame, X86::edi);
1295 m_jit.pushl_r(X86::edx);
1299 m_jit.link(profile, m_jit.label());
1300 emitCTICall(i, Machine::cti_op_ret_profiler);
1301 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
1306 case op_new_array: {
1307 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1308 emitPutArg(X86::edx, 0);
1309 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1310 emitCTICall(i, Machine::cti_op_new_array);
1311 emitPutResult(instruction[i + 1].u.operand);
1316 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1317 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1318 emitCTICall(i, Machine::cti_op_resolve);
1319 emitPutResult(instruction[i + 1].u.operand);
1323 case op_construct: {
1324 compileOpCall(instruction, i, OpConstruct);
1328 case op_construct_verify: {
1329 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1331 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1332 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1333 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1334 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1335 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1337 m_jit.link(isImmediate, m_jit.label());
1338 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
1339 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1340 m_jit.link(isObject, m_jit.label());
1345 case op_get_by_val: {
1346 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1347 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1348 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1349 emitFastArithImmToInt(X86::edx);
1350 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1351 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1352 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1353 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1355 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1356 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1357 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1358 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1360 // Get the value from the vector
1361 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1362 emitPutResult(instruction[i + 1].u.operand);
1366 case op_resolve_func: {
1367 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1368 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1369 emitCTICall(i, Machine::cti_op_resolve_func);
1370 emitPutResult(instruction[i + 1].u.operand);
1371 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1376 compileBinaryArithOp(op_sub, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1380 case op_put_by_val: {
1381 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1382 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1383 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1384 emitFastArithImmToInt(X86::edx);
1385 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1386 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1387 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1388 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1390 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1391 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1392 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1393 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1394 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1395 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1396 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1398 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1399 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1400 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1401 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1403 // All good - put the value into the array.
1404 m_jit.link(inFastVector, m_jit.label());
1405 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1406 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1410 CTI_COMPILE_BINARY_OP(op_lesseq)
1411 case op_loop_if_true: {
1412 emitSlowScriptCheck(i);
1414 unsigned target = instruction[i + 2].u.operand;
1415 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1417 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1418 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1419 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1420 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1422 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1423 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1424 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1425 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1427 m_jit.link(isZero, m_jit.label());
1431 case op_resolve_base: {
1432 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1433 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1434 emitCTICall(i, Machine::cti_op_resolve_base);
1435 emitPutResult(instruction[i + 1].u.operand);
1440 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1441 emitCTICall(i, Machine::cti_op_negate);
1442 emitPutResult(instruction[i + 1].u.operand);
1446 case op_resolve_skip: {
1447 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1448 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1449 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1450 emitCTICall(i, Machine::cti_op_resolve_skip);
1451 emitPutResult(instruction[i + 1].u.operand);
1455 case op_resolve_global: {
1457 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1458 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1459 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1460 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1462 // Check StructureID of global object
1463 m_jit.movl_i32r(globalObject, X86::eax);
1464 m_jit.movl_mr(structureIDAddr, X86::edx);
1465 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1466 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1467 m_slowCases.append(SlowCaseEntry(slowCase, i));
1469 // Load cached property
1470 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1471 m_jit.movl_mr(offsetAddr, X86::edx);
1472 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1473 emitPutResult(instruction[i + 1].u.operand);
1474 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1477 m_jit.link(slowCase, m_jit.label());
1478 emitPutArgConstant(globalObject, 0);
1479 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1480 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1481 emitCTICall(i, Machine::cti_op_resolve_global);
1482 emitPutResult(instruction[i + 1].u.operand);
1483 m_jit.link(end, m_jit.label());
1485 ++structureIDInstructionIndex;
1488 CTI_COMPILE_BINARY_OP(op_div)
1490 int srcDst = instruction[i + 1].u.operand;
1491 emitGetArg(srcDst, X86::eax);
1492 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1493 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1494 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1495 emitPutResult(srcDst, X86::eax);
1500 unsigned target = instruction[i + 3].u.operand;
1501 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1503 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1504 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1505 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1506 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1508 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1509 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1510 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1511 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1512 m_jit.cmpl_rr(X86::edx, X86::eax);
1513 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1519 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1520 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1521 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1522 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1523 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1524 emitPutResult(instruction[i + 1].u.operand);
1529 unsigned target = instruction[i + 2].u.operand;
1530 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1532 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1533 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1534 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1535 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1537 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1538 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1539 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1540 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1542 m_jit.link(isNonZero, m_jit.label());
1547 int srcDst = instruction[i + 2].u.operand;
1548 emitGetArg(srcDst, X86::eax);
1549 m_jit.movl_rr(X86::eax, X86::edx);
1550 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1551 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1552 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1553 emitPutResult(srcDst, X86::edx);
1554 emitPutResult(instruction[i + 1].u.operand);
1558 case op_unexpected_load: {
1559 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1560 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1561 emitPutResult(instruction[i + 1].u.operand);
1566 int retAddrDst = instruction[i + 1].u.operand;
1567 int target = instruction[i + 2].u.operand;
1568 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1569 X86Assembler::JmpDst addrPosition = m_jit.label();
1570 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1571 X86Assembler::JmpDst sretTarget = m_jit.label();
1572 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1577 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1582 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1583 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1584 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1585 m_jit.cmpl_rr(X86::edx, X86::eax);
1586 m_jit.sete_r(X86::eax);
1587 m_jit.movzbl_rr(X86::eax, X86::eax);
1588 emitTagAsBoolImmediate(X86::eax);
1589 emitPutResult(instruction[i + 1].u.operand);
1594 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1595 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1596 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1597 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1598 emitFastArithImmToInt(X86::eax);
1599 emitFastArithImmToInt(X86::ecx);
1600 m_jit.shll_CLr(X86::eax);
1601 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1602 emitPutResult(instruction[i + 1].u.operand);
1607 unsigned src1 = instruction[i + 2].u.operand;
1608 unsigned src2 = instruction[i + 3].u.operand;
1609 unsigned dst = instruction[i + 1].u.operand;
1610 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1611 emitGetArg(src2, X86::eax);
1612 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1613 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1615 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1616 emitGetArg(src1, X86::eax);
1617 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1618 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1621 emitGetArg(src1, X86::eax);
1622 emitGetArg(src2, X86::edx);
1623 m_jit.andl_rr(X86::edx, X86::eax);
1624 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1631 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1632 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1633 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1634 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1635 emitFastArithImmToInt(X86::ecx);
1636 m_jit.sarl_CLr(X86::eax);
1637 emitFastArithPotentiallyReTagImmediate(X86::eax);
1638 emitPutResult(instruction[i + 1].u.operand);
1643 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1644 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1645 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1646 emitPutResult(instruction[i + 1].u.operand);
1650 case op_resolve_with_base: {
1651 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1652 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1653 emitCTICall(i, Machine::cti_op_resolve_with_base);
1654 emitPutResult(instruction[i + 1].u.operand);
1655 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1659 case op_new_func_exp: {
1660 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1661 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1662 emitCTICall(i, Machine::cti_op_new_func_exp);
1663 emitPutResult(instruction[i + 1].u.operand);
1668 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1669 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1670 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1671 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1672 emitFastArithDeTagImmediate(X86::eax);
1673 m_slowCases.append(SlowCaseEntry(emitFastArithDeTagImmediateJumpIfZero(X86::ecx), i));
1675 m_jit.idivl_r(X86::ecx);
1676 emitFastArithReTagImmediate(X86::edx);
1677 m_jit.movl_rr(X86::edx, X86::eax);
1678 emitPutResult(instruction[i + 1].u.operand);
1683 unsigned target = instruction[i + 2].u.operand;
1684 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1686 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1687 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1688 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1689 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1691 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1692 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1693 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1694 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1696 m_jit.link(isZero, m_jit.label());
1700 CTI_COMPILE_BINARY_OP(op_less)
1702 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1703 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1704 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1705 m_jit.cmpl_rr(X86::eax, X86::edx);
1707 m_jit.setne_r(X86::eax);
1708 m_jit.movzbl_rr(X86::eax, X86::eax);
1709 emitTagAsBoolImmediate(X86::eax);
1711 emitPutResult(instruction[i + 1].u.operand);
1717 int srcDst = instruction[i + 2].u.operand;
1718 emitGetArg(srcDst, X86::eax);
1719 m_jit.movl_rr(X86::eax, X86::edx);
1720 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1721 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1722 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1723 emitPutResult(srcDst, X86::edx);
1724 emitPutResult(instruction[i + 1].u.operand);
1728 CTI_COMPILE_BINARY_OP(op_urshift)
1730 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1731 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1732 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1733 m_jit.xorl_rr(X86::edx, X86::eax);
1734 emitFastArithReTagImmediate(X86::eax);
1735 emitPutResult(instruction[i + 1].u.operand);
1739 case op_new_regexp: {
1740 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1741 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1742 emitCTICall(i, Machine::cti_op_new_regexp);
1743 emitPutResult(instruction[i + 1].u.operand);
1748 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1749 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1750 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1751 m_jit.orl_rr(X86::edx, X86::eax);
1752 emitPutResult(instruction[i + 1].u.operand);
1756 case op_call_eval: {
1757 compileOpCall(instruction, i, OpCallEval);
1762 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1763 emitCTICall(i, Machine::cti_op_throw);
1764 m_jit.addl_i8r(0x24, X86::esp);
1765 m_jit.popl_r(X86::edi);
1766 m_jit.popl_r(X86::esi);
1771 case op_get_pnames: {
1772 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1773 emitCTICall(i, Machine::cti_op_get_pnames);
1774 emitPutResult(instruction[i + 1].u.operand);
1778 case op_next_pname: {
1779 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1780 unsigned target = instruction[i + 3].u.operand;
1781 emitCTICall(i, Machine::cti_op_next_pname);
1782 m_jit.testl_rr(X86::eax, X86::eax);
1783 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1784 emitPutResult(instruction[i + 1].u.operand);
1785 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1786 m_jit.link(endOfIter, m_jit.label());
1790 case op_push_scope: {
1791 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1792 emitCTICall(i, Machine::cti_op_push_scope);
1796 case op_pop_scope: {
1797 emitCTICall(i, Machine::cti_op_pop_scope);
1801 CTI_COMPILE_UNARY_OP(op_typeof)
1802 CTI_COMPILE_UNARY_OP(op_is_undefined)
1803 CTI_COMPILE_UNARY_OP(op_is_boolean)
1804 CTI_COMPILE_UNARY_OP(op_is_number)
1805 CTI_COMPILE_UNARY_OP(op_is_string)
1806 CTI_COMPILE_UNARY_OP(op_is_object)
1807 CTI_COMPILE_UNARY_OP(op_is_function)
1809 compileOpStrictEq(instruction, i, OpStrictEq);
1813 case op_nstricteq: {
1814 compileOpStrictEq(instruction, i, OpNStrictEq);
1818 case op_to_jsnumber: {
1819 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1821 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1822 X86Assembler::JmpSrc wasImmediate = m_jit.emitUnlinkedJnz();
1824 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1826 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1827 m_jit.cmpl_i32m(NumberType, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::ecx);
1829 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1831 m_jit.link(wasImmediate, m_jit.label());
1833 emitPutResult(instruction[i + 1].u.operand);
1838 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1839 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1840 emitCTICall(i, Machine::cti_op_in);
1841 emitPutResult(instruction[i + 1].u.operand);
1845 case op_push_new_scope: {
1846 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1847 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1848 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1849 emitCTICall(i, Machine::cti_op_push_new_scope);
1850 emitPutResult(instruction[i + 1].u.operand);
1855 emitGetCTIParam(CTI_ARGS_callFrame, X86::edi); // edi := r
1856 emitPutResult(instruction[i + 1].u.operand);
1860 case op_jmp_scopes: {
1861 unsigned count = instruction[i + 1].u.operand;
1862 emitPutArgConstant(count, 0);
1863 emitCTICall(i, Machine::cti_op_jmp_scopes);
1864 unsigned target = instruction[i + 2].u.operand;
1865 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1869 case op_put_by_index: {
1870 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1871 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1872 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1873 emitCTICall(i, Machine::cti_op_put_by_index);
1877 case op_switch_imm: {
1878 unsigned tableIndex = instruction[i + 1].u.operand;
1879 unsigned defaultOffset = instruction[i + 2].u.operand;
1880 unsigned scrutinee = instruction[i + 3].u.operand;
1882 // create jump table for switch destinations, track this switch statement.
1883 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1884 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1885 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1887 emitGetPutArg(scrutinee, 0, X86::ecx);
1888 emitPutArgConstant(tableIndex, 4);
1889 emitCTICall(i, Machine::cti_op_switch_imm);
1890 m_jit.jmp_r(X86::eax);
1894 case op_switch_char: {
1895 unsigned tableIndex = instruction[i + 1].u.operand;
1896 unsigned defaultOffset = instruction[i + 2].u.operand;
1897 unsigned scrutinee = instruction[i + 3].u.operand;
1899 // create jump table for switch destinations, track this switch statement.
1900 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1901 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1902 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1904 emitGetPutArg(scrutinee, 0, X86::ecx);
1905 emitPutArgConstant(tableIndex, 4);
1906 emitCTICall(i, Machine::cti_op_switch_char);
1907 m_jit.jmp_r(X86::eax);
1911 case op_switch_string: {
1912 unsigned tableIndex = instruction[i + 1].u.operand;
1913 unsigned defaultOffset = instruction[i + 2].u.operand;
1914 unsigned scrutinee = instruction[i + 3].u.operand;
1916 // create jump table for switch destinations, track this switch statement.
1917 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1918 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1920 emitGetPutArg(scrutinee, 0, X86::ecx);
1921 emitPutArgConstant(tableIndex, 4);
1922 emitCTICall(i, Machine::cti_op_switch_string);
1923 m_jit.jmp_r(X86::eax);
1927 case op_del_by_val: {
1928 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1929 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1930 emitCTICall(i, Machine::cti_op_del_by_val);
1931 emitPutResult(instruction[i + 1].u.operand);
1935 case op_put_getter: {
1936 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1937 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1938 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1939 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1940 emitCTICall(i, Machine::cti_op_put_getter);
1944 case op_put_setter: {
1945 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1946 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1947 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1948 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1949 emitCTICall(i, Machine::cti_op_put_setter);
1953 case op_new_error: {
1954 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1955 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1956 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1957 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1958 emitCTICall(i, Machine::cti_op_new_error);
1959 emitPutResult(instruction[i + 1].u.operand);
1964 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1965 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1966 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1967 emitCTICall(i, Machine::cti_op_debug);
1972 unsigned dst = instruction[i + 1].u.operand;
1973 unsigned src1 = instruction[i + 2].u.operand;
1975 emitGetArg(src1, X86::eax);
1976 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1977 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1979 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1980 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1981 m_jit.setnz_r(X86::eax);
1983 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1985 m_jit.link(isImmediate, m_jit.label());
1987 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1988 m_jit.andl_rr(X86::eax, X86::ecx);
1989 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1990 m_jit.sete_r(X86::eax);
1992 m_jit.link(wasNotImmediate, m_jit.label());
1994 m_jit.movzbl_rr(X86::eax, X86::eax);
1995 emitTagAsBoolImmediate(X86::eax);
2002 unsigned dst = instruction[i + 1].u.operand;
2003 unsigned src1 = instruction[i + 2].u.operand;
2005 emitGetArg(src1, X86::eax);
2006 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2007 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
2009 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2010 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
2011 m_jit.setz_r(X86::eax);
2013 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
2015 m_jit.link(isImmediate, m_jit.label());
2017 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
2018 m_jit.andl_rr(X86::eax, X86::ecx);
2019 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
2020 m_jit.setne_r(X86::eax);
2022 m_jit.link(wasNotImmediate, m_jit.label());
2024 m_jit.movzbl_rr(X86::eax, X86::eax);
2025 emitTagAsBoolImmediate(X86::eax);
2032 // Even though CTI doesn't use them, we initialize our constant
2033 // registers to zap stale pointers, to avoid unnecessarily prolonging
2034 // object lifetime and increasing GC pressure.
2035 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
2036 for (size_t j = 0; j < count; ++j)
2037 emitInitRegister(j);
2042 case op_enter_with_activation: {
2043 // Even though CTI doesn't use them, we initialize our constant
2044 // registers to zap stale pointers, to avoid unnecessarily prolonging
2045 // object lifetime and increasing GC pressure.
2046 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
2047 for (size_t j = 0; j < count; ++j)
2048 emitInitRegister(j);
2050 emitCTICall(i, Machine::cti_op_push_activation);
2051 emitPutResult(instruction[i + 1].u.operand);
2056 case op_create_arguments: {
2057 emitCTICall(i, Machine::cti_op_create_arguments);
2061 case op_convert_this: {
2062 emitGetArg(instruction[i + 1].u.operand, X86::eax);
2064 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
2065 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::edx);
2066 m_jit.testl_i32m(NeedsThisConversion, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx);
2067 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
2072 case op_get_array_length:
2073 case op_get_by_id_chain:
2074 case op_get_by_id_generic:
2075 case op_get_by_id_proto:
2076 case op_get_by_id_self:
2077 case op_get_string_length:
2078 case op_put_by_id_generic:
2079 case op_put_by_id_replace:
2080 case op_put_by_id_transition:
2081 ASSERT_NOT_REACHED();
2085 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2089 void CTI::privateCompileLinkPass()
2091 unsigned jmpTableCount = m_jmpTable.size();
2092 for (unsigned i = 0; i < jmpTableCount; ++i)
2093 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
2097 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
2099 m_jit.link(iter->from, m_jit.label()); \
2100 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
2101 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
2102 emitCTICall(i, Machine::cti_##name); \
2103 emitPutResult(instruction[i + 1].u.operand); \
2108 void CTI::privateCompileSlowCases()
2110 unsigned structureIDInstructionIndex = 0;
2112 Instruction* instruction = m_codeBlock->instructions.begin();
2113 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
2114 unsigned i = iter->to;
2115 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
2116 case op_convert_this: {
2117 m_jit.link(iter->from, m_jit.label());
2118 m_jit.link((++iter)->from, m_jit.label());
2119 emitPutArg(X86::eax, 0);
2120 emitCTICall(i, Machine::cti_op_convert_this);
2121 emitPutResult(instruction[i + 1].u.operand);
2126 unsigned dst = instruction[i + 1].u.operand;
2127 unsigned src1 = instruction[i + 2].u.operand;
2128 unsigned src2 = instruction[i + 3].u.operand;
2129 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
2130 X86Assembler::JmpSrc notImm = iter->from;
2131 m_jit.link((++iter)->from, m_jit.label());
2132 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
2133 m_jit.link(notImm, m_jit.label());
2134 emitGetPutArg(src1, 0, X86::ecx);
2135 emitPutArg(X86::edx, 4);
2136 emitCTICall(i, Machine::cti_op_add);
2138 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
2139 X86Assembler::JmpSrc notImm = iter->from;
2140 m_jit.link((++iter)->from, m_jit.label());
2141 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
2142 m_jit.link(notImm, m_jit.label());
2143 emitPutArg(X86::eax, 0);
2144 emitGetPutArg(src2, 4, X86::ecx);
2145 emitCTICall(i, Machine::cti_op_add);
2148 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
2149 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2150 compileBinaryArithOpSlowCase(op_add, iter, dst, src1, src2, types, i);
2152 ASSERT_NOT_REACHED();
2158 case op_get_by_val: {
2159 // The slow case that handles accesses to arrays (below) may jump back up to here.
2160 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
2162 X86Assembler::JmpSrc notImm = iter->from;
2163 m_jit.link((++iter)->from, m_jit.label());
2164 m_jit.link((++iter)->from, m_jit.label());
2165 emitFastArithIntToImmNoCheck(X86::edx);
2166 m_jit.link(notImm, m_jit.label());
2167 emitPutArg(X86::eax, 0);
2168 emitPutArg(X86::edx, 4);
2169 emitCTICall(i, Machine::cti_op_get_by_val);
2170 emitPutResult(instruction[i + 1].u.operand);
2171 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2173 // This is slow case that handles accesses to arrays above the fast cut-off.
2174 // First, check if this is an access to the vector
2175 m_jit.link((++iter)->from, m_jit.label());
2176 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
2177 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
2179 // okay, missed the fast region, but it is still in the vector. Get the value.
2180 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
2181 // Check whether the value loaded is zero; if so we need to return undefined.
2182 m_jit.testl_rr(X86::ecx, X86::ecx);
2183 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
2184 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
2190 compileBinaryArithOpSlowCase(op_sub, iter, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2195 m_jit.link(iter->from, m_jit.label());
2196 m_jit.link((++iter)->from, m_jit.label());
2197 emitPutArg(X86::eax, 0);
2198 emitPutArg(X86::ecx, 4);
2199 emitCTICall(i, Machine::cti_op_rshift);
2200 emitPutResult(instruction[i + 1].u.operand);
2205 X86Assembler::JmpSrc notImm1 = iter->from;
2206 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2207 m_jit.link((++iter)->from, m_jit.label());
2208 emitGetArg(instruction[i + 2].u.operand, X86::eax);
2209 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2210 m_jit.link(notImm1, m_jit.label());
2211 m_jit.link(notImm2, m_jit.label());
2212 emitPutArg(X86::eax, 0);
2213 emitPutArg(X86::ecx, 4);
2214 emitCTICall(i, Machine::cti_op_lshift);
2215 emitPutResult(instruction[i + 1].u.operand);
2219 case op_loop_if_less: {
2220 emitSlowScriptCheck(i);
2222 unsigned target = instruction[i + 3].u.operand;
2223 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2225 m_jit.link(iter->from, m_jit.label());
2226 emitPutArg(X86::edx, 0);
2227 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2228 emitCTICall(i, Machine::cti_op_loop_if_less);
2229 m_jit.testl_rr(X86::eax, X86::eax);
2230 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2232 m_jit.link(iter->from, m_jit.label());
2233 m_jit.link((++iter)->from, m_jit.label());
2234 emitPutArg(X86::eax, 0);
2235 emitPutArg(X86::edx, 4);
2236 emitCTICall(i, Machine::cti_op_loop_if_less);
2237 m_jit.testl_rr(X86::eax, X86::eax);
2238 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2243 case op_put_by_id: {
2244 m_jit.link(iter->from, m_jit.label());
2245 m_jit.link((++iter)->from, m_jit.label());
2247 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2248 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2249 emitPutArg(X86::eax, 0);
2250 emitPutArg(X86::edx, 8);
2251 X86Assembler::JmpSrc call = emitCTICall(i, Machine::cti_op_put_by_id);
2253 // Track the location of the call; this will be used to recover repatch information.
2254 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2255 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2256 ++structureIDInstructionIndex;
2261 case op_get_by_id: {
2262 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
2263 // so that we only need track one pointer into the slow case code - we track a pointer to the location
2264 // of the call (which we can use to look up the repatch information), but should a array-length or
2265 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
2266 // the distance from the call to the head of the slow case.
2268 m_jit.link(iter->from, m_jit.label());
2269 m_jit.link((++iter)->from, m_jit.label());
2272 X86Assembler::JmpDst coldPathBegin = m_jit.label();
2274 emitPutArg(X86::eax, 0);
2275 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
2276 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2277 X86Assembler::JmpSrc call = emitCTICall(i, Machine::cti_op_get_by_id);
2278 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
2279 emitPutResult(instruction[i + 1].u.operand);
2281 // Track the location of the call; this will be used to recover repatch information.
2282 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2283 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2284 ++structureIDInstructionIndex;
2289 case op_resolve_global: {
2290 ++structureIDInstructionIndex;
2294 case op_loop_if_lesseq: {
2295 emitSlowScriptCheck(i);
2297 unsigned target = instruction[i + 3].u.operand;
2298 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2300 m_jit.link(iter->from, m_jit.label());
2301 emitPutArg(X86::edx, 0);
2302 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2303 emitCTICall(i, Machine::cti_op_loop_if_lesseq);
2304 m_jit.testl_rr(X86::eax, X86::eax);
2305 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2307 m_jit.link(iter->from, m_jit.label());
2308 m_jit.link((++iter)->from, m_jit.label());
2309 emitPutArg(X86::eax, 0);
2310 emitPutArg(X86::edx, 4);
2311 emitCTICall(i, Machine::cti_op_loop_if_lesseq);
2312 m_jit.testl_rr(X86::eax, X86::eax);
2313 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2319 unsigned srcDst = instruction[i + 1].u.operand;
2320 X86Assembler::JmpSrc notImm = iter->from;
2321 m_jit.link((++iter)->from, m_jit.label());
2322 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2323 m_jit.link(notImm, m_jit.label());
2324 emitPutArg(X86::eax, 0);
2325 emitCTICall(i, Machine::cti_op_pre_inc);
2326 emitPutResult(srcDst);
2330 case op_put_by_val: {
2331 // Normal slow cases - either is not an immediate imm, or is an array.
2332 X86Assembler::JmpSrc notImm = iter->from;
2333 m_jit.link((++iter)->from, m_jit.label());
2334 m_jit.link((++iter)->from, m_jit.label());
2335 emitFastArithIntToImmNoCheck(X86::edx);
2336 m_jit.link(notImm, m_jit.label());
2337 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2338 emitPutArg(X86::eax, 0);
2339 emitPutArg(X86::edx, 4);
2340 emitPutArg(X86::ecx, 8);
2341 emitCTICall(i, Machine::cti_op_put_by_val);
2342 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2344 // slow cases for immediate int accesses to arrays
2345 m_jit.link((++iter)->from, m_jit.label());
2346 m_jit.link((++iter)->from, m_jit.label());
2347 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2348 emitPutArg(X86::eax, 0);
2349 emitPutArg(X86::edx, 4);
2350 emitPutArg(X86::ecx, 8);
2351 emitCTICall(i, Machine::cti_op_put_by_val_array);
2356 case op_loop_if_true: {
2357 emitSlowScriptCheck(i);
2359 m_jit.link(iter->from, m_jit.label());
2360 emitPutArg(X86::eax, 0);
2361 emitCTICall(i, Machine::cti_op_jtrue);
2362 m_jit.testl_rr(X86::eax, X86::eax);
2363 unsigned target = instruction[i + 2].u.operand;
2364 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2369 unsigned srcDst = instruction[i + 1].u.operand;
2370 X86Assembler::JmpSrc notImm = iter->from;
2371 m_jit.link((++iter)->from, m_jit.label());
2372 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2373 m_jit.link(notImm, m_jit.label());
2374 emitPutArg(X86::eax, 0);
2375 emitCTICall(i, Machine::cti_op_pre_dec);
2376 emitPutResult(srcDst);
2381 unsigned target = instruction[i + 3].u.operand;
2382 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2384 m_jit.link(iter->from, m_jit.label());
2385 emitPutArg(X86::edx, 0);
2386 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2387 emitCTICall(i, Machine::cti_op_jless);
2388 m_jit.testl_rr(X86::eax, X86::eax);
2389 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2391 m_jit.link(iter->from, m_jit.label());
2392 m_jit.link((++iter)->from, m_jit.label());
2393 emitPutArg(X86::eax, 0);
2394 emitPutArg(X86::edx, 4);
2395 emitCTICall(i, Machine::cti_op_jless);
2396 m_jit.testl_rr(X86::eax, X86::eax);
2397 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2403 m_jit.link(iter->from, m_jit.label());
2404 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2405 emitPutArg(X86::eax, 0);
2406 emitCTICall(i, Machine::cti_op_not);
2407 emitPutResult(instruction[i + 1].u.operand);
2412 m_jit.link(iter->from, m_jit.label());
2413 emitPutArg(X86::eax, 0);
2414 emitCTICall(i, Machine::cti_op_jtrue);
2415 m_jit.testl_rr(X86::eax, X86::eax);
2416 unsigned target = instruction[i + 2].u.operand;
2417 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2422 unsigned srcDst = instruction[i + 2].u.operand;
2423 m_jit.link(iter->from, m_jit.label());
2424 m_jit.link((++iter)->from, m_jit.label());
2425 emitPutArg(X86::eax, 0);
2426 emitCTICall(i, Machine::cti_op_post_inc);
2427 emitPutResult(instruction[i + 1].u.operand);
2428 emitPutResult(srcDst, X86::edx);
2433 m_jit.link(iter->from, m_jit.label());
2434 emitPutArg(X86::eax, 0);
2435 emitCTICall(i, Machine::cti_op_bitnot);
2436 emitPutResult(instruction[i + 1].u.operand);
2441 unsigned src1 = instruction[i + 2].u.operand;
2442 unsigned src2 = instruction[i + 3].u.operand;
2443 unsigned dst = instruction[i + 1].u.operand;
2444 if (getConstantImmediateNumericArg(src1)) {
2445 m_jit.link(iter->from, m_jit.label());
2446 emitGetPutArg(src1, 0, X86::ecx);
2447 emitPutArg(X86::eax, 4);
2448 emitCTICall(i, Machine::cti_op_bitand);
2450 } else if (getConstantImmediateNumericArg(src2)) {
2451 m_jit.link(iter->from, m_jit.label());
2452 emitPutArg(X86::eax, 0);
2453 emitGetPutArg(src2, 4, X86::ecx);
2454 emitCTICall(i, Machine::cti_op_bitand);
2457 m_jit.link(iter->from, m_jit.label());
2458 emitGetPutArg(src1, 0, X86::ecx);
2459 emitPutArg(X86::edx, 4);
2460 emitCTICall(i, Machine::cti_op_bitand);
2467 m_jit.link(iter->from, m_jit.label());
2468 emitPutArg(X86::eax, 0);
2469 emitCTICall(i, Machine::cti_op_jtrue);
2470 m_jit.testl_rr(X86::eax, X86::eax);
2471 unsigned target = instruction[i + 2].u.operand;
2472 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2477 unsigned srcDst = instruction[i + 2].u.operand;
2478 m_jit.link(iter->from, m_jit.label());
2479 m_jit.link((++iter)->from, m_jit.label());
2480 emitPutArg(X86::eax, 0);
2481 emitCTICall(i, Machine::cti_op_post_dec);
2482 emitPutResult(instruction[i + 1].u.operand);
2483 emitPutResult(srcDst, X86::edx);
2488 m_jit.link(iter->from, m_jit.label());
2489 emitPutArg(X86::eax, 0);
2490 emitPutArg(X86::edx, 4);
2491 emitCTICall(i, Machine::cti_op_bitxor);
2492 emitPutResult(instruction[i + 1].u.operand);
2497 m_jit.link(iter->from, m_jit.label());
2498 emitPutArg(X86::eax, 0);
2499 emitPutArg(X86::edx, 4);
2500 emitCTICall(i, Machine::cti_op_bitor);
2501 emitPutResult(instruction[i + 1].u.operand);
2506 m_jit.link(iter->from, m_jit.label());
2507 emitPutArg(X86::eax, 0);
2508 emitPutArg(X86::edx, 4);
2509 emitCTICall(i, Machine::cti_op_eq);
2510 emitPutResult(instruction[i + 1].u.operand);
2515 m_jit.link(iter->from, m_jit.label());
2516 emitPutArg(X86::eax, 0);
2517 emitPutArg(X86::edx, 4);
2518 emitCTICall(i, Machine::cti_op_neq);
2519 emitPutResult(instruction[i + 1].u.operand);
2523 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2524 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2525 case op_instanceof: {
2526 m_jit.link(iter->from, m_jit.label());
2527 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2528 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2529 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2530 emitCTICall(i, Machine::cti_op_instanceof);
2531 emitPutResult(instruction[i + 1].u.operand);
2536 X86Assembler::JmpSrc notImm1 = iter->from;
2537 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2538 m_jit.link((++iter)->from, m_jit.label());
2539 emitFastArithReTagImmediate(X86::eax);
2540 emitFastArithReTagImmediate(X86::ecx);
2541 m_jit.link(notImm1, m_jit.label());
2542 m_jit.link(notImm2, m_jit.label());
2543 emitPutArg(X86::eax, 0);
2544 emitPutArg(X86::ecx, 4);
2545 emitCTICall(i, Machine::cti_op_mod);
2546 emitPutResult(instruction[i + 1].u.operand);
2551 int dst = instruction[i + 1].u.operand;
2552 int src1 = instruction[i + 2].u.operand;
2553 int src2 = instruction[i + 3].u.operand;
2554 JSValue* src1Value = getConstantImmediateNumericArg(src1);
2555 JSValue* src2Value = getConstantImmediateNumericArg(src2);
2557 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {
2558 m_jit.link(iter->from, m_jit.label());
2559 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2560 emitGetPutArg(src1, 0, X86::ecx);
2561 emitGetPutArg(src2, 4, X86::ecx);
2562 emitCTICall(i, Machine::cti_op_mul);
2564 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {
2565 m_jit.link(iter->from, m_jit.label());
2566 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2567 emitGetPutArg(src1, 0, X86::ecx);
2568 emitGetPutArg(src2, 4, X86::ecx);
2569 emitCTICall(i, Machine::cti_op_mul);
2572 compileBinaryArithOpSlowCase(op_mul, iter, dst, src1, src2, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2579 case op_construct: {
2580 m_jit.link(iter->from, m_jit.label());
2582 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2583 emitCTICall(i, Machine::cti_vm_compile);
2584 emitCall(i, X86::eax);
2586 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2588 // Put the return value in dst. In the interpreter, op_ret does this.
2589 emitPutResult(instruction[i + 1].u.operand);
2593 case op_to_jsnumber: {
2594 m_jit.link(iter->from, m_jit.label());
2595 m_jit.link(iter->from, m_jit.label());
2597 emitPutArg(X86::eax, 0);
2598 emitCTICall(i, Machine::cti_op_to_jsnumber);
2600 emitPutResult(instruction[i + 1].u.operand);
2606 ASSERT_NOT_REACHED();
2610 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2613 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2616 void CTI::privateCompile()
2618 // Could use a popl_m, but would need to offset the following instruction if so.
2619 m_jit.popl_r(X86::ecx);
2620 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2622 X86Assembler::JmpSrc slowRegisterFileCheck;
2623 X86Assembler::JmpDst afterRegisterFileCheck;
2624 if (m_codeBlock->codeType == FunctionCode) {
2625 emitGetCTIParam(CTI_ARGS_registerFile, X86::eax);
2626 m_jit.leal_mr(m_codeBlock->numCalleeRegisters * sizeof(Register), X86::edi, X86::edx);
2627 m_jit.cmpl_mr(OBJECT_OFFSET(RegisterFile, m_end), X86::eax, X86::edx);
2628 slowRegisterFileCheck = m_jit.emitUnlinkedJg();
2629 afterRegisterFileCheck = m_jit.label();
2632 privateCompileMainPass();
2633 privateCompileLinkPass();
2634 privateCompileSlowCases();
2636 if (m_codeBlock->codeType == FunctionCode) {
2637 m_jit.link(slowRegisterFileCheck, m_jit.label());
2638 emitCTICall(0, Machine::cti_register_file_check);
2639 X86Assembler::JmpSrc backToBody = m_jit.emitUnlinkedJmp();
2640 m_jit.link(backToBody, afterRegisterFileCheck);
2643 ASSERT(m_jmpTable.isEmpty());
2645 void* code = m_jit.copy();
2648 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2649 for (unsigned i = 0; i < m_switches.size(); ++i) {
2650 SwitchRecord record = m_switches[i];
2651 unsigned opcodeIndex = record.m_opcodeIndex;
2653 if (record.m_type != SwitchRecord::String) {
2654 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2655 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2657 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2659 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2660 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2661 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2664 ASSERT(record.m_type == SwitchRecord::String);
2666 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2668 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2669 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2670 unsigned offset = it->second.branchOffset;
2671 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2676 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2677 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2679 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2681 X86Assembler::link(code, iter->from, iter->to);
2682 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2685 // Link absolute addresses for jsr
2686 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2687 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2689 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2690 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2691 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2692 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2695 m_codeBlock->ctiCode = code;
2698 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2700 // Check eax is an object of the right StructureID.
2701 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2702 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2703 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2704 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2706 // Checks out okay! - getDirectOffset
2707 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2708 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2711 void* code = m_jit.copy();
2714 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2715 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2717 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2719 ctiRepatchCallByReturnAddress(returnAddress, code);
2722 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2724 #if USE(CTI_REPATCH_PIC)
2725 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2727 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2728 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2730 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2731 // referencing the prototype object - let's speculatively load it's table nice and early!)
2732 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_callFrame));
2733 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2734 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2736 // check eax is an object of the right StructureID.
2737 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2738 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2739 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2740 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2742 // Check the prototype object's StructureID had not changed.
2743 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2744 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2745 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2747 // Checks out okay! - getDirectOffset
2748 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2750 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2752 void* code = m_jit.copy();
2755 // Use the repatch information to link the failure cases back to the original slow case routine.
2756 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2757 X86Assembler::link(code, failureCases1, slowCaseBegin);
2758 X86Assembler::link(code, failureCases2, slowCaseBegin);
2759 X86Assembler::link(code, failureCases3, slowCaseBegin);
2761 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2762 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2763 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2765 // Track the stub we have created so that it will be deleted later.
2766 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2768 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2769 // FIXME: should revert this repatching, on failure.
2770 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2771 X86Assembler::repatchBranchOffset(jmpLocation, code);
2773 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2774 // referencing the prototype object - let's speculatively load it's table nice and early!)
2775 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_callFrame));
2776 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2777 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2779 // check eax is an object of the right StructureID.
2780 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2781 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2782 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2783 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2785 // Check the prototype object's StructureID had not changed.
2786 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2787 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2788 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2790 // Checks out okay! - getDirectOffset
2791 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2795 void* code = m_jit.copy();
2798 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2799 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2800 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2802 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2804 ctiRepatchCallByReturnAddress(returnAddress, code);
2808 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2812 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2814 // Check eax is an object of the right StructureID.
2815 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2816 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2817 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2818 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2820 StructureID* currStructureID = structureID;
2821 RefPtr<StructureID>* chainEntries = chain->head();
2822 JSObject* protoObject = 0;
2823 for (unsigned i = 0; i<count; ++i) {
2824 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_callFrame));
2825 currStructureID = chainEntries[i].get();
2827 // Check the prototype object's StructureID had not changed.
2828 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2829 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2830 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2832 ASSERT(protoObject);
2834 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2835 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2836 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2839 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2841 void* code = m_jit.copy();
2844 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2845 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2847 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2849 ctiRepatchCallByReturnAddress(returnAddress, code);
2852 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2854 // check eax is an object of the right StructureID.
2855 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2856 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2857 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2858 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2860 // checks out okay! - putDirectOffset
2861 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2862 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2865 void* code = m_jit.copy();
2868 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2869 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2871 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2873 ctiRepatchCallByReturnAddress(returnAddress, code);
2878 static JSValue* transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2880 baseObject->transitionTo(newStructureID);
2881 baseObject->putDirectOffset(cachedOffset, value);
2887 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2889 return oldStructureID->propertyStorageCapacity() != newStructureID->propertyStorageCapacity();
2892 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2894 Vector<X86Assembler::JmpSrc, 16> failureCases;
2895 // check eax is an object of the right StructureID.
2896 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2897 failureCases.append(m_jit.emitUnlinkedJne());
2898 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2899 failureCases.append(m_jit.emitUnlinkedJne());
2900 Vector<X86Assembler::JmpSrc> successCases;
2903 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2904 // proto(ecx) = baseObject->structureID()->prototype()
2905 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2906 failureCases.append(m_jit.emitUnlinkedJne());
2907 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2909 // ecx = baseObject->m_structureID
2910 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2911 // null check the prototype
2912 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2913 successCases.append(m_jit.emitUnlinkedJe());
2915 // Check the structure id
2916 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2917 failureCases.append(m_jit.emitUnlinkedJne());
2919 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2920 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2921 failureCases.append(m_jit.emitUnlinkedJne());
2922 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2925 failureCases.append(m_jit.emitUnlinkedJne());
2926 for (unsigned i = 0; i < successCases.size(); ++i)
2927 m_jit.link(successCases[i], m_jit.label());
2929 X86Assembler::JmpSrc callTarget;
2930 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2931 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2932 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2933 // codeblock should ensure oldStructureID->m_refCount > 0
2934 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2935 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2936 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2939 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2940 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2942 // Slow case transition -- we're going to need to quite a bit of work,
2943 // so just make a call
2944 m_jit.pushl_r(X86::edx);
2945 m_jit.pushl_r(X86::eax);
2946 m_jit.movl_i32r(cachedOffset, X86::eax);
2947 m_jit.pushl_r(X86::eax);
2948 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2949 m_jit.pushl_r(X86::eax);
2950 callTarget = m_jit.emitCall();
2951 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2955 X86Assembler::JmpSrc failureJump;
2956 if (failureCases.size()) {
2957 for (unsigned i = 0; i < failureCases.size(); ++i)
2958 m_jit.link(failureCases[i], m_jit.label());
2959 m_jit.emitRestoreArgumentReferenceForTrampoline();
2960 failureJump = m_jit.emitUnlinkedJmp();
2963 void* code = m_jit.copy();
2966 if (failureCases.size())
2967 X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2969 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2970 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2972 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2974 ctiRepatchCallByReturnAddress(returnAddress, code);
2977 void* CTI::privateCompileArrayLengthTrampoline()
2979 // Check eax is an array
2980 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2981 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2982 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2983 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2985 // Checks out okay! - get the length from the storage
2986 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2987 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2989 m_jit.addl_rr(X86::eax, X86::eax);
2990 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2991 m_jit.addl_i8r(1, X86::eax);
2995 void* code = m_jit.copy();
2998 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2999 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3000 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3005 void* CTI::privateCompileStringLengthTrampoline()
3007 // Check eax is a string
3008 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3009 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3010 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
3011 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3013 // Checks out okay! - get the length from the Ustring.
3014 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
3015 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
3017 m_jit.addl_rr(X86::eax, X86::eax);
3018 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
3019 m_jit.addl_i8r(1, X86::eax);
3023 void* code = m_jit.copy();
3026 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3027 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3028 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3033 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
3035 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
3037 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
3038 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
3039 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_generic));
3041 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
3042 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
3043 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
3046 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
3048 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
3050 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
3051 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
3052 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_put_by_id_generic));
3054 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
3055 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
3056 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
3059 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
3061 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
3063 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
3064 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3066 // Check eax is an array
3067 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3068 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3069 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
3070 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3072 // Checks out okay! - get the length from the storage
3073 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
3074 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
3076 m_jit.addl_rr(X86::ecx, X86::ecx);
3077 X86Assembler::JmpSrc failureClobberedECX = m_jit.emitUnlinkedJo();
3078 m_jit.addl_i8r(1, X86::ecx);
3080 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
3082 m_jit.link(failureClobberedECX, m_jit.label());
3083 m_jit.emitRestoreArgumentReference();
3084 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJmp();
3086 void* code = m_jit.copy();
3089 // Use the repatch information to link the failure cases back to the original slow case routine.
3090 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
3091 X86Assembler::link(code, failureCases1, slowCaseBegin);
3092 X86Assembler::link(code, failureCases2, slowCaseBegin);
3093 X86Assembler::link(code, failureCases3, slowCaseBegin);
3095 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3096 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3097 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3099 // Track the stub we have created so that it will be deleted later.
3100 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3102 // Finally repatch the jump to sow case back in the hot path to jump here instead.
3103 // FIXME: should revert this repatching, on failure.
3104 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3105 X86Assembler::repatchBranchOffset(jmpLocation, code);
3108 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
3110 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
3111 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
3112 m_jit.movl_mr(index * sizeof(Register), dst, dst);
3115 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
3117 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
3118 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
3119 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
3124 void* CTI::compileRegExp(Machine* machine, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
3126 // TODO: better error messages
3127 if (pattern.size() > MaxPatternSize) {
3128 *error_ptr = "regular expression too large";
3132 X86Assembler jit(machine->jitCodeBuffer());
3133 WRECParser parser(pattern, ignoreCase, multiline, jit);
3135 jit.emitConvertToFastCall();
3137 // Preserve regs & initialize outputRegister.
3138 jit.pushl_r(WRECGenerator::outputRegister);
3139 jit.pushl_r(WRECGenerator::currentValueRegister);
3140 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
3141 jit.pushl_r(WRECGenerator::currentPositionRegister);
3142 // load output pointer
3147 , X86::esp, WRECGenerator::outputRegister);
3149 // restart point on match fail.
3150 WRECGenerator::JmpDst nextLabel = jit.label();
3152 // (1) Parse Disjunction:
3154 // Parsing the disjunction should fully consume the pattern.
3155 JmpSrcVector failures;
3156 parser.parseDisjunction(failures);
3157 if (parser.isEndOfPattern()) {
3158 parser.m_err = WRECParser::Error_malformedPattern;
3161 // TODO: better error messages
3162 *error_ptr = "TODO: better error messages";
3167 // Set return value & pop registers from the stack.
3169 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
3170 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
3172 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
3173 jit.popl_r(X86::eax);
3174 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3175 jit.popl_r(WRECGenerator::currentValueRegister);
3176 jit.popl_r(WRECGenerator::outputRegister);
3179 jit.link(noOutput, jit.label());
3181 jit.popl_r(X86::eax);
3182 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3183 jit.popl_r(WRECGenerator::currentValueRegister);
3184 jit.popl_r(WRECGenerator::outputRegister);
3188 // All fails link to here. Progress the start point & if it is within scope, loop.
3189 // Otherwise, return fail value.
3190 WRECGenerator::JmpDst here = jit.label();
3191 for (unsigned i = 0; i < failures.size(); ++i)
3192 jit.link(failures[i], here);
3195 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
3196 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
3197 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
3198 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
3199 jit.link(jit.emitUnlinkedJle(), nextLabel);
3201 jit.addl_i8r(4, X86::esp);
3203 jit.movl_i32r(-1, X86::eax);
3204 jit.popl_r(WRECGenerator::currentValueRegister);
3205 jit.popl_r(WRECGenerator::outputRegister);
3208 *numSubpatterns_ptr = parser.m_numSubpatterns;
3210 void* code = jit.copy();
3215 #endif // ENABLE(WREC)
3219 #endif // ENABLE(CTI)