2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
34 #include "wrec/WREC.h"
40 #if COMPILER(GCC) && PLATFORM(X86)
42 ".globl _ctiTrampoline" "\n"
43 "_ctiTrampoline:" "\n"
46 "subl $0x24, %esp" "\n"
47 "movl $512, %esi" "\n"
48 "call *0x30(%esp)" "\n" //Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
49 "addl $0x24, %esp" "\n"
56 ".globl _ctiVMThrowTrampoline" "\n"
57 "_ctiVMThrowTrampoline:" "\n"
59 "movl 0x34(%esp), %ecx" "\n" //Ox34 = 0x0D * 4, 0x0D = CTI_ARGS_exec
60 "cmpl $0, 8(%ecx)" "\n"
65 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
66 "addl $0x24, %esp" "\n"
76 __declspec(naked) JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**)
92 __declspec(naked) void ctiVMThrowTrampoline()
96 call JSC::Machine::cti_vm_throw;
109 ALWAYS_INLINE bool CTI::isConstant(int src)
111 return src >= m_codeBlock->numVars && src < m_codeBlock->numVars + m_codeBlock->numConstants;
114 ALWAYS_INLINE JSValue* CTI::getConstant(ExecState* exec, int src)
116 return m_codeBlock->constantRegisters[src - m_codeBlock->numVars].jsValue(exec);
119 // get arg puts an arg from the SF register array into a h/w register
120 ALWAYS_INLINE void CTI::emitGetArg(unsigned src, X86Assembler::RegisterID dst)
122 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
123 if (isConstant(src)) {
124 JSValue* js = getConstant(m_exec, src);
125 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
127 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
130 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
131 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
133 if (isConstant(src)) {
134 JSValue* js = getConstant(m_exec, src);
135 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
137 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
138 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
142 // puts an arg onto the stack, as an arg to a context threaded function.
143 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
145 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
148 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
150 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
153 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
155 if (isConstant(src)) {
156 JSValue* js = getConstant(m_exec, src);
157 return JSImmediate::isNumber(js) ? js : 0;
162 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
164 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
167 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
169 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
172 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
174 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
177 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
179 m_jit.movl_rm(from, entry * sizeof(Register), X86::edi);
182 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
184 m_jit.movl_mr(entry * sizeof(Register), X86::edi, to);
187 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
189 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
190 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
193 ALWAYS_INLINE void CTI::emitInitRegister(unsigned dst)
195 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
196 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
199 #if ENABLE(SAMPLING_TOOL)
200 unsigned inCalledCode = 0;
203 void ctiSetReturnAddress(void** where, void* what)
208 void ctiRepatchCallByReturnAddress(void* where, void* what)
210 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
215 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
221 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
223 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
224 m_jit.cmpl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
225 X86Assembler::JmpSrc noException = m_jit.emitUnlinkedJe();
227 m_jit.link(noException, m_jit.label());
230 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
233 if (isConstant(src1)) {
234 JSValue* js = getConstant(m_exec, src1);
236 JSImmediate::isImmediate(js) ?
237 (JSImmediate::isNumber(js) ? 'i' :
238 JSImmediate::isBoolean(js) ? 'b' :
239 js->isUndefined() ? 'u' :
240 js->isNull() ? 'n' : '?')
242 (js->isString() ? 's' :
243 js->isObject() ? 'o' :
247 if (isConstant(src2)) {
248 JSValue* js = getConstant(m_exec, src2);
250 JSImmediate::isImmediate(js) ?
251 (JSImmediate::isNumber(js) ? 'i' :
252 JSImmediate::isBoolean(js) ? 'b' :
253 js->isUndefined() ? 'u' :
254 js->isNull() ? 'n' : '?')
256 (js->isString() ? 's' :
257 js->isObject() ? 'o' :
260 if ((which1 != '*') | (which2 != '*'))
261 fprintf(stderr, "Types %c %c\n", which1, which2);
266 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, X86::RegisterID r)
268 X86Assembler::JmpSrc call = m_jit.emitCall(r);
269 m_calls.append(CallRecord(call, opcodeIndex));
270 emitDebugExceptionCheck();
275 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
277 #if ENABLE(SAMPLING_TOOL)
278 m_jit.movl_i32m(1, &inCalledCode);
280 X86Assembler::JmpSrc call = m_jit.emitCall();
281 m_calls.append(CallRecord(call, helper, opcodeIndex));
282 emitDebugExceptionCheck();
283 #if ENABLE(SAMPLING_TOOL)
284 m_jit.movl_i32m(0, &inCalledCode);
290 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
292 #if ENABLE(SAMPLING_TOOL)
293 m_jit.movl_i32m(1, &inCalledCode);
295 X86Assembler::JmpSrc call = m_jit.emitCall();
296 m_calls.append(CallRecord(call, helper, opcodeIndex));
297 emitDebugExceptionCheck();
298 #if ENABLE(SAMPLING_TOOL)
299 m_jit.movl_i32m(0, &inCalledCode);
305 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
307 #if ENABLE(SAMPLING_TOOL)
308 m_jit.movl_i32m(1, &inCalledCode);
310 X86Assembler::JmpSrc call = m_jit.emitCall();
311 m_calls.append(CallRecord(call, helper, opcodeIndex));
312 emitDebugExceptionCheck();
313 #if ENABLE(SAMPLING_TOOL)
314 m_jit.movl_i32m(0, &inCalledCode);
320 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
322 #if ENABLE(SAMPLING_TOOL)
323 m_jit.movl_i32m(1, &inCalledCode);
325 X86Assembler::JmpSrc call = m_jit.emitCall();
326 m_calls.append(CallRecord(call, helper, opcodeIndex));
327 emitDebugExceptionCheck();
328 #if ENABLE(SAMPLING_TOOL)
329 m_jit.movl_i32m(0, &inCalledCode);
335 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
337 #if ENABLE(SAMPLING_TOOL)
338 m_jit.movl_i32m(1, &inCalledCode);
340 X86Assembler::JmpSrc call = m_jit.emitCall();
341 m_calls.append(CallRecord(call, helper, opcodeIndex));
342 emitDebugExceptionCheck();
343 #if ENABLE(SAMPLING_TOOL)
344 m_jit.movl_i32m(0, &inCalledCode);
350 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
352 m_jit.testl_i32r(JSImmediate::TagMask, reg);
353 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
356 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
358 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
359 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
362 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
364 m_jit.movl_rr(reg1, X86::ecx);
365 m_jit.andl_rr(reg2, X86::ecx);
366 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
369 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
371 ASSERT(JSImmediate::isNumber(imm));
372 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
375 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
377 // op_mod relies on this being a sub - setting zf if result is 0.
378 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
381 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
383 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
386 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
388 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
391 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
393 m_jit.sarl_i8r(1, reg);
396 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
398 m_jit.addl_rr(reg, reg);
399 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
400 emitFastArithReTagImmediate(reg);
403 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
405 m_jit.addl_rr(reg, reg);
406 emitFastArithReTagImmediate(reg);
409 ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
411 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
412 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
415 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
416 : m_jit(machine->jitCodeBuffer())
419 , m_codeBlock(codeBlock)
420 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
421 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
425 #define CTI_COMPILE_BINARY_OP(name) \
427 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
428 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
429 emitCall(i, Machine::cti_##name); \
430 emitPutResult(instruction[i + 1].u.operand); \
435 #define CTI_COMPILE_UNARY_OP(name) \
437 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
438 emitCall(i, Machine::cti_##name); \
439 emitPutResult(instruction[i + 1].u.operand); \
444 #if ENABLE(SAMPLING_TOOL)
445 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
448 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
450 int dst = instruction[i + 1].u.operand;
451 int firstArg = instruction[i + 4].u.operand;
452 int argCount = instruction[i + 5].u.operand;
453 int registerOffset = instruction[i + 6].u.operand;
455 if (type == OpCallEval)
456 emitGetPutArg(instruction[i + 3].u.operand, 16, X86::ecx);
458 if (type == OpConstruct) {
459 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 20);
460 emitPutArgConstant(argCount, 16);
461 emitPutArgConstant(registerOffset, 12);
462 emitPutArgConstant(firstArg, 8);
463 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
465 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 12);
466 emitPutArgConstant(argCount, 8);
467 emitPutArgConstant(registerOffset, 4);
469 int thisVal = instruction[i + 3].u.operand;
470 if (thisVal == missingThisObjectMarker()) {
471 // FIXME: should this be loaded dynamically off m_exec?
472 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_exec->globalThisValue()), firstArg * sizeof(Register), X86::edi);
474 emitGetArg(thisVal, X86::ecx);
475 emitPutResult(firstArg, X86::ecx);
479 X86Assembler::JmpSrc wasEval;
480 if (type == OpCallEval) {
481 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
482 emitCall(i, Machine::cti_op_call_eval);
483 m_jit.emitRestoreArgumentReference();
485 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
487 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
488 wasEval = m_jit.emitUnlinkedJne();
490 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
491 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
493 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
494 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
497 // Fast check for JS function.
498 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
499 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
500 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
501 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
502 m_jit.link(isNotObject, m_jit.label());
504 // This handles host functions
505 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
507 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
508 m_jit.link(isJSFunction, m_jit.label());
510 // This handles JSFunctions
511 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction));
513 // Initialize the parts of the call frame that have not already been initialized.
514 emitGetCTIParam(CTI_ARGS_r, X86::edi);
515 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), RegisterFile::CallerCodeBlock * static_cast<int>(sizeof(Register)), X86::edi);
516 m_jit.movl_i32m(dst, RegisterFile::ReturnValueRegister * static_cast<int>(sizeof(Register)), X86::edi);
518 // Check the ctiCode has been generated - if not, this is handled in a slow case.
519 m_jit.testl_rr(X86::eax, X86::eax);
520 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
521 emitCall(i, X86::eax);
523 // In the interpreter the following actions are performed by op_ret:
525 // Restore ExecState::m_scopeChain and CTI_ARGS_scopeChain. NOTE: After
526 // op_ret, %edx holds the caller's scope chain.
527 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
528 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
529 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
530 // Restore ExecState::m_callFrame.
531 m_jit.movl_rm(X86::edi, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
532 // Restore CTI_ARGS_codeBlock.
533 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
535 X86Assembler::JmpDst end = m_jit.label();
536 m_jit.link(wasNotJSFunction, end);
537 if (type == OpCallEval)
538 m_jit.link(wasEval, end);
543 void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
545 bool negated = (type == OpNStrictEq);
547 unsigned dst = instruction[i + 1].u.operand;
548 unsigned src1 = instruction[i + 2].u.operand;
549 unsigned src2 = instruction[i + 3].u.operand;
551 emitGetArg(src1, X86::eax);
552 emitGetArg(src2, X86::edx);
554 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
555 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
556 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
557 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
559 m_jit.cmpl_rr(X86::edx, X86::eax);
561 m_jit.setne_r(X86::eax);
563 m_jit.sete_r(X86::eax);
564 m_jit.movzbl_rr(X86::eax, X86::eax);
565 emitTagAsBoolImmediate(X86::eax);
567 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
569 m_jit.link(firstNotImmediate, m_jit.label());
571 // check that edx is immediate but not the zero immediate
572 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
573 m_jit.setz_r(X86::ecx);
574 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
575 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
576 m_jit.sete_r(X86::edx);
577 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
578 m_jit.orl_rr(X86::ecx, X86::edx);
580 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
582 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
584 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
586 m_jit.link(secondNotImmediate, m_jit.label());
587 // check that eax is not the zero immediate (we know it must be immediate)
588 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
589 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
591 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
593 m_jit.link(bothWereImmediates, m_jit.label());
594 m_jit.link(firstWasNotImmediate, m_jit.label());
599 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
601 m_jit.subl_i8r(1, X86::esi);
602 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
603 emitCall(opcodeIndex, Machine::cti_timeout_check);
605 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
606 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_globalData), X86::ecx, X86::ecx);
607 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
608 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
609 m_jit.link(skipTimeout, m_jit.label());
612 void CTI::privateCompileMainPass()
614 Instruction* instruction = m_codeBlock->instructions.begin();
615 unsigned instructionCount = m_codeBlock->instructions.size();
617 unsigned structureIDInstructionIndex = 0;
619 for (unsigned i = 0; i < instructionCount; ) {
620 m_labels[i] = m_jit.label();
622 #if ENABLE(SAMPLING_TOOL)
623 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
626 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
627 m_jit.emitRestoreArgumentReference();
628 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
630 unsigned src = instruction[i + 2].u.operand;
632 m_jit.movl_i32r(reinterpret_cast<unsigned>(getConstant(m_exec, src)), X86::edx);
634 emitGetArg(src, X86::edx);
635 emitPutResult(instruction[i + 1].u.operand, X86::edx);
640 unsigned dst = instruction[i + 1].u.operand;
641 unsigned src1 = instruction[i + 2].u.operand;
642 unsigned src2 = instruction[i + 3].u.operand;
643 if (isConstant(src2)) {
644 JSValue* value = getConstant(m_exec, src2);
645 if (JSImmediate::isNumber(value)) {
646 emitGetArg(src1, X86::eax);
647 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
648 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
649 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
654 } else if (!isConstant(src1)) {
655 emitGetArg(src1, X86::eax);
656 emitGetArg(src2, X86::edx);
657 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
658 emitFastArithDeTagImmediate(X86::eax);
659 m_jit.addl_rr(X86::edx, X86::eax);
660 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
665 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
666 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
667 emitCall(i, Machine::cti_op_add);
668 emitPutResult(instruction[i + 1].u.operand);
673 if (m_codeBlock->needsFullScopeChain)
674 emitCall(i, Machine::cti_op_end);
675 emitGetArg(instruction[i + 1].u.operand, X86::eax);
676 #if ENABLE(SAMPLING_TOOL)
677 m_jit.movl_i32m(-1, ¤tOpcodeID);
679 m_jit.pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
685 unsigned target = instruction[i + 1].u.operand;
686 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
691 int srcDst = instruction[i + 1].u.operand;
692 emitGetArg(srcDst, X86::eax);
693 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
694 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
695 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
696 emitPutResult(srcDst, X86::eax);
701 emitSlowScriptCheck(i);
703 unsigned target = instruction[i + 1].u.operand;
704 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
708 case op_loop_if_less: {
709 emitSlowScriptCheck(i);
711 unsigned target = instruction[i + 3].u.operand;
712 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
714 emitGetArg(instruction[i + 1].u.operand, X86::edx);
715 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
716 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
717 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
719 emitGetArg(instruction[i + 1].u.operand, X86::eax);
720 emitGetArg(instruction[i + 2].u.operand, X86::edx);
721 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
722 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
723 m_jit.cmpl_rr(X86::edx, X86::eax);
724 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
729 case op_loop_if_lesseq: {
730 emitSlowScriptCheck(i);
732 unsigned target = instruction[i + 3].u.operand;
733 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
735 emitGetArg(instruction[i + 1].u.operand, X86::edx);
736 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
737 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
738 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
740 emitGetArg(instruction[i + 1].u.operand, X86::eax);
741 emitGetArg(instruction[i + 2].u.operand, X86::edx);
742 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
743 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
744 m_jit.cmpl_rr(X86::edx, X86::eax);
745 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
750 case op_new_object: {
751 emitCall(i, Machine::cti_op_new_object);
752 emitPutResult(instruction[i + 1].u.operand);
757 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
758 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
759 // such that the StructureID & offset are always at the same distance from this.
761 emitGetArg(instruction[i + 1].u.operand, X86::eax);
762 emitGetArg(instruction[i + 3].u.operand, X86::edx);
764 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
765 X86Assembler::JmpDst hotPathBegin = m_jit.label();
766 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
767 ++structureIDInstructionIndex;
769 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
770 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
771 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
772 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
773 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
774 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
776 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
777 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
778 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
779 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
785 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
786 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
787 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
788 // to jump back to if one of these trampolies finds a match.
790 emitGetArg(instruction[i + 2].u.operand, X86::eax);
792 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
794 X86Assembler::JmpDst hotPathBegin = m_jit.label();
795 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
796 ++structureIDInstructionIndex;
798 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
799 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
800 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
801 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
802 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
804 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
805 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
806 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
807 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
812 case op_instanceof: {
813 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
814 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
815 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
817 // check if any are immediates
818 m_jit.orl_rr(X86::eax, X86::ecx);
819 m_jit.orl_rr(X86::edx, X86::ecx);
820 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
822 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
824 // check that all are object type - this is a bit of a bithack to avoid excess branching;
825 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
826 // this works because NumberType and StringType are smaller
827 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
828 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
829 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
830 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
831 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
832 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
833 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
834 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
836 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
838 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
839 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
840 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
841 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
843 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
845 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
846 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
848 // optimistically load true result
849 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(true)), X86::eax);
851 X86Assembler::JmpDst loop = m_jit.label();
853 // load value's prototype
854 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
855 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
857 m_jit.cmpl_rr(X86::ecx, X86::edx);
858 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
860 m_jit.cmpl_i32r(reinterpret_cast<int32_t>(jsNull()), X86::ecx);
861 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
862 m_jit.link(goToLoop, loop);
864 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(false)), X86::eax);
866 m_jit.link(exit, m_jit.label());
868 emitPutResult(instruction[i + 1].u.operand);
874 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
875 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
876 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
877 emitCall(i, Machine::cti_op_del_by_id);
878 emitPutResult(instruction[i + 1].u.operand);
883 unsigned dst = instruction[i + 1].u.operand;
884 unsigned src1 = instruction[i + 2].u.operand;
885 unsigned src2 = instruction[i + 3].u.operand;
886 if (isConstant(src1) || isConstant(src2)) {
887 unsigned constant = src1;
888 unsigned nonconstant = src2;
889 if (!isConstant(src1)) {
893 JSValue* value = getConstant(m_exec, constant);
894 if (JSImmediate::isNumber(value)) {
895 emitGetArg(nonconstant, X86::eax);
896 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
897 emitFastArithImmToInt(X86::eax);
898 m_jit.imull_i32r( X86::eax, getDeTaggedConstantImmediate(value), X86::eax);
899 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
900 emitFastArithPotentiallyReTagImmediate(X86::eax);
907 emitGetArg(src1, X86::eax);
908 emitGetArg(src2, X86::edx);
909 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
910 emitFastArithDeTagImmediate(X86::eax);
911 emitFastArithImmToInt(X86::edx);
912 m_jit.imull_rr(X86::edx, X86::eax);
913 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
914 emitFastArithPotentiallyReTagImmediate(X86::eax);
920 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
921 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
922 emitCall(i, Machine::cti_op_new_func);
923 emitPutResult(instruction[i + 1].u.operand);
928 compileOpCall(instruction, i);
932 case op_get_global_var: {
933 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
934 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
935 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
936 emitPutResult(instruction[i + 1].u.operand, X86::eax);
940 case op_put_global_var: {
941 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
942 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
943 emitGetArg(instruction[i + 3].u.operand, X86::edx);
944 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
948 case op_get_scoped_var: {
949 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
951 emitGetCTIParam(CTI_ARGS_scopeChain, X86::eax);
953 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
955 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
956 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
957 emitPutResult(instruction[i + 1].u.operand);
961 case op_put_scoped_var: {
962 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
964 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
965 emitGetArg(instruction[i + 3].u.operand, X86::eax);
967 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
969 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
970 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
975 // Check for an activation - if there is one, jump to the hook below.
976 m_jit.cmpl_i32m(0, RegisterFile::OptionalCalleeActivation * static_cast<int>(sizeof(Register)), X86::edi);
977 X86Assembler::JmpSrc activation = m_jit.emitUnlinkedJne();
978 X86Assembler::JmpDst activated = m_jit.label();
980 // Check for a profiler - if there is one, jump to the hook below.
981 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
982 m_jit.cmpl_i32m(0, X86::eax);
983 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
984 X86Assembler::JmpDst profiled = m_jit.label();
986 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
987 if (m_codeBlock->needsFullScopeChain)
988 emitCall(i, Machine::cti_op_ret_scopeChain);
990 // Return the result in %eax, and the caller scope chain in %edx (this is read from the callee call frame,
991 // but is only assigned to ExecState::m_scopeChain if returning to a JSFunction).
992 emitGetArg(instruction[i + 1].u.operand, X86::eax);
993 m_jit.movl_mr(RegisterFile::CallerScopeChain * static_cast<int>(sizeof(Register)), X86::edi, X86::edx);
994 // Restore the machine return addess from the callframe, roll the callframe back to the caller callframe,
995 // and preserve a copy of r on the stack at CTI_ARGS_r.
996 m_jit.movl_mr(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi, X86::ecx);
997 m_jit.movl_mr(RegisterFile::CallerRegisters * static_cast<int>(sizeof(Register)), X86::edi, X86::edi);
998 emitPutCTIParam(X86::edi, CTI_ARGS_r);
1000 m_jit.pushl_r(X86::ecx);
1004 m_jit.link(activation, m_jit.label());
1005 emitCall(i, Machine::cti_op_ret_activation);
1006 m_jit.link(m_jit.emitUnlinkedJmp(), activated);
1009 m_jit.link(profile, m_jit.label());
1010 emitCall(i, Machine::cti_op_ret_profiler);
1011 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
1016 case op_new_array: {
1017 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1018 emitPutArg(X86::edx, 0);
1019 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1020 emitCall(i, Machine::cti_op_new_array);
1021 emitPutResult(instruction[i + 1].u.operand);
1026 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1027 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1028 emitCall(i, Machine::cti_op_resolve);
1029 emitPutResult(instruction[i + 1].u.operand);
1033 case op_construct: {
1034 compileOpCall(instruction, i, OpConstruct);
1038 case op_construct_verify: {
1039 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1041 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1042 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1043 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1044 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1045 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1047 m_jit.link(isImmediate, m_jit.label());
1048 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
1049 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1050 m_jit.link(isObject, m_jit.label());
1055 case op_get_by_val: {
1056 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1057 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1058 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1059 emitFastArithImmToInt(X86::edx);
1060 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1061 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1062 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1063 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1065 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1066 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1067 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1068 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1070 // Get the value from the vector
1071 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1072 emitPutResult(instruction[i + 1].u.operand);
1076 case op_resolve_func: {
1077 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1078 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1079 emitCall(i, Machine::cti_op_resolve_func);
1080 emitPutResult(instruction[i + 1].u.operand);
1081 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1082 emitPutResult(instruction[i + 2].u.operand);
1087 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1088 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1089 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1090 m_jit.subl_rr(X86::edx, X86::eax);
1091 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1092 emitFastArithReTagImmediate(X86::eax);
1093 emitPutResult(instruction[i + 1].u.operand);
1097 case op_put_by_val: {
1098 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1099 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1100 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1101 emitFastArithImmToInt(X86::edx);
1102 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1103 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1104 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1105 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1107 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1108 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1109 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1110 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1111 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1112 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1113 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1115 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1116 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1117 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1118 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1120 // All good - put the value into the array.
1121 m_jit.link(inFastVector, m_jit.label());
1122 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1123 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1127 CTI_COMPILE_BINARY_OP(op_lesseq)
1128 case op_loop_if_true: {
1129 emitSlowScriptCheck(i);
1131 unsigned target = instruction[i + 2].u.operand;
1132 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1134 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1135 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1136 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1137 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1139 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1140 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1141 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1142 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1144 m_jit.link(isZero, m_jit.label());
1148 case op_resolve_base: {
1149 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1150 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1151 emitCall(i, Machine::cti_op_resolve_base);
1152 emitPutResult(instruction[i + 1].u.operand);
1157 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1158 emitCall(i, Machine::cti_op_negate);
1159 emitPutResult(instruction[i + 1].u.operand);
1163 case op_resolve_skip: {
1164 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1165 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1166 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1167 emitCall(i, Machine::cti_op_resolve_skip);
1168 emitPutResult(instruction[i + 1].u.operand);
1172 case op_resolve_global: {
1174 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1175 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1176 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1177 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1179 // Check StructureID of global object
1180 m_jit.movl_i32r(globalObject, X86::eax);
1181 m_jit.movl_mr(structureIDAddr, X86::edx);
1182 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1183 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1184 m_slowCases.append(SlowCaseEntry(slowCase, i));
1186 // Load cached property
1187 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1188 m_jit.movl_mr(offsetAddr, X86::edx);
1189 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1190 emitPutResult(instruction[i + 1].u.operand);
1191 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1194 m_jit.link(slowCase, m_jit.label());
1195 emitPutArgConstant(globalObject, 0);
1196 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1197 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1198 emitCall(i, Machine::cti_op_resolve_global);
1199 emitPutResult(instruction[i + 1].u.operand);
1200 m_jit.link(end, m_jit.label());
1202 ++structureIDInstructionIndex;
1205 CTI_COMPILE_BINARY_OP(op_div)
1207 int srcDst = instruction[i + 1].u.operand;
1208 emitGetArg(srcDst, X86::eax);
1209 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1210 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1211 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1212 emitPutResult(srcDst, X86::eax);
1217 unsigned target = instruction[i + 3].u.operand;
1218 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1220 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1221 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1222 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1223 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1225 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1226 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1227 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1228 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1229 m_jit.cmpl_rr(X86::edx, X86::eax);
1230 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1236 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1237 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1238 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1239 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1240 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1241 emitPutResult(instruction[i + 1].u.operand);
1246 unsigned target = instruction[i + 2].u.operand;
1247 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1249 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1250 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1251 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1252 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1254 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1255 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1256 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1257 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1259 m_jit.link(isNonZero, m_jit.label());
1264 int srcDst = instruction[i + 2].u.operand;
1265 emitGetArg(srcDst, X86::eax);
1266 m_jit.movl_rr(X86::eax, X86::edx);
1267 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1268 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1269 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1270 emitPutResult(srcDst, X86::edx);
1271 emitPutResult(instruction[i + 1].u.operand);
1275 case op_unexpected_load: {
1276 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1277 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1278 emitPutResult(instruction[i + 1].u.operand);
1283 int retAddrDst = instruction[i + 1].u.operand;
1284 int target = instruction[i + 2].u.operand;
1285 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1286 X86Assembler::JmpDst addrPosition = m_jit.label();
1287 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1288 X86Assembler::JmpDst sretTarget = m_jit.label();
1289 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1294 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1299 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1300 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1301 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1302 m_jit.cmpl_rr(X86::edx, X86::eax);
1303 m_jit.sete_r(X86::eax);
1304 m_jit.movzbl_rr(X86::eax, X86::eax);
1305 emitTagAsBoolImmediate(X86::eax);
1306 emitPutResult(instruction[i + 1].u.operand);
1311 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1312 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1313 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1314 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1315 emitFastArithImmToInt(X86::eax);
1316 emitFastArithImmToInt(X86::ecx);
1317 m_jit.shll_CLr(X86::eax);
1318 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1319 emitPutResult(instruction[i + 1].u.operand);
1324 unsigned src1 = instruction[i + 2].u.operand;
1325 unsigned src2 = instruction[i + 3].u.operand;
1326 unsigned dst = instruction[i + 1].u.operand;
1327 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1328 emitGetArg(src2, X86::eax);
1329 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1330 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1332 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1333 emitGetArg(src1, X86::eax);
1334 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1335 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1338 emitGetArg(src1, X86::eax);
1339 emitGetArg(src2, X86::edx);
1340 m_jit.andl_rr(X86::edx, X86::eax);
1341 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1348 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1349 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1350 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1351 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1352 emitFastArithImmToInt(X86::ecx);
1353 m_jit.sarl_CLr(X86::eax);
1354 emitFastArithPotentiallyReTagImmediate(X86::eax);
1355 emitPutResult(instruction[i + 1].u.operand);
1360 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1361 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1362 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1363 emitPutResult(instruction[i + 1].u.operand);
1367 case op_resolve_with_base: {
1368 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1369 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1370 emitCall(i, Machine::cti_op_resolve_with_base);
1371 emitPutResult(instruction[i + 1].u.operand);
1372 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1373 emitPutResult(instruction[i + 2].u.operand);
1377 case op_new_func_exp: {
1378 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1379 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1380 emitCall(i, Machine::cti_op_new_func_exp);
1381 emitPutResult(instruction[i + 1].u.operand);
1386 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1387 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1388 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1389 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1390 emitFastArithDeTagImmediate(X86::eax);
1391 emitFastArithDeTagImmediate(X86::ecx);
1392 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1394 m_jit.idivl_r(X86::ecx);
1395 emitFastArithReTagImmediate(X86::edx);
1396 m_jit.movl_rr(X86::edx, X86::eax);
1397 emitPutResult(instruction[i + 1].u.operand);
1402 unsigned target = instruction[i + 2].u.operand;
1403 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1405 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1406 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1407 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1408 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1410 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1411 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1412 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1413 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1415 m_jit.link(isZero, m_jit.label());
1419 CTI_COMPILE_BINARY_OP(op_less)
1421 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1422 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1423 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1424 m_jit.cmpl_rr(X86::eax, X86::edx);
1426 m_jit.setne_r(X86::eax);
1427 m_jit.movzbl_rr(X86::eax, X86::eax);
1428 emitTagAsBoolImmediate(X86::eax);
1430 emitPutResult(instruction[i + 1].u.operand);
1436 int srcDst = instruction[i + 2].u.operand;
1437 emitGetArg(srcDst, X86::eax);
1438 m_jit.movl_rr(X86::eax, X86::edx);
1439 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1440 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1441 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1442 emitPutResult(srcDst, X86::edx);
1443 emitPutResult(instruction[i + 1].u.operand);
1447 CTI_COMPILE_BINARY_OP(op_urshift)
1449 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1450 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1451 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1452 m_jit.xorl_rr(X86::edx, X86::eax);
1453 emitFastArithReTagImmediate(X86::eax);
1454 emitPutResult(instruction[i + 1].u.operand);
1458 case op_new_regexp: {
1459 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1460 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1461 emitCall(i, Machine::cti_op_new_regexp);
1462 emitPutResult(instruction[i + 1].u.operand);
1467 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1468 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1469 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1470 m_jit.orl_rr(X86::edx, X86::eax);
1471 emitPutResult(instruction[i + 1].u.operand);
1475 case op_call_eval: {
1476 compileOpCall(instruction, i, OpCallEval);
1481 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1482 emitCall(i, Machine::cti_op_throw);
1483 m_jit.addl_i8r(0x24, X86::esp);
1484 m_jit.popl_r(X86::edi);
1485 m_jit.popl_r(X86::esi);
1490 case op_get_pnames: {
1491 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1492 emitCall(i, Machine::cti_op_get_pnames);
1493 emitPutResult(instruction[i + 1].u.operand);
1497 case op_next_pname: {
1498 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1499 unsigned target = instruction[i + 3].u.operand;
1500 emitCall(i, Machine::cti_op_next_pname);
1501 m_jit.testl_rr(X86::eax, X86::eax);
1502 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1503 emitPutResult(instruction[i + 1].u.operand);
1504 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1505 m_jit.link(endOfIter, m_jit.label());
1509 case op_push_scope: {
1510 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1511 emitCall(i, Machine::cti_op_push_scope);
1515 case op_pop_scope: {
1516 emitCall(i, Machine::cti_op_pop_scope);
1520 CTI_COMPILE_UNARY_OP(op_typeof)
1521 CTI_COMPILE_UNARY_OP(op_is_undefined)
1522 CTI_COMPILE_UNARY_OP(op_is_boolean)
1523 CTI_COMPILE_UNARY_OP(op_is_number)
1524 CTI_COMPILE_UNARY_OP(op_is_string)
1525 CTI_COMPILE_UNARY_OP(op_is_object)
1526 CTI_COMPILE_UNARY_OP(op_is_function)
1528 compileOpStrictEq(instruction, i, OpStrictEq);
1532 case op_nstricteq: {
1533 compileOpStrictEq(instruction, i, OpNStrictEq);
1537 case op_to_jsnumber: {
1538 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1539 emitCall(i, Machine::cti_op_to_jsnumber);
1540 emitPutResult(instruction[i + 1].u.operand);
1545 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1546 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1547 emitCall(i, Machine::cti_op_in);
1548 emitPutResult(instruction[i + 1].u.operand);
1552 case op_push_new_scope: {
1553 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1554 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1555 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1556 emitCall(i, Machine::cti_op_push_new_scope);
1557 emitPutResult(instruction[i + 1].u.operand);
1562 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1563 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1564 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_exception), X86::ecx, X86::eax);
1565 m_jit.movl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
1566 emitPutResult(instruction[i + 1].u.operand);
1570 case op_jmp_scopes: {
1571 unsigned count = instruction[i + 1].u.operand;
1572 emitPutArgConstant(count, 0);
1573 emitCall(i, Machine::cti_op_jmp_scopes);
1574 unsigned target = instruction[i + 2].u.operand;
1575 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1579 case op_put_by_index: {
1580 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1581 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1582 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1583 emitCall(i, Machine::cti_op_put_by_index);
1587 case op_switch_imm: {
1588 unsigned tableIndex = instruction[i + 1].u.operand;
1589 unsigned defaultOffset = instruction[i + 2].u.operand;
1590 unsigned scrutinee = instruction[i + 3].u.operand;
1592 // create jump table for switch destinations, track this switch statement.
1593 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1594 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1595 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1597 emitGetPutArg(scrutinee, 0, X86::ecx);
1598 emitPutArgConstant(tableIndex, 4);
1599 emitCall(i, Machine::cti_op_switch_imm);
1600 m_jit.jmp_r(X86::eax);
1604 case op_switch_char: {
1605 unsigned tableIndex = instruction[i + 1].u.operand;
1606 unsigned defaultOffset = instruction[i + 2].u.operand;
1607 unsigned scrutinee = instruction[i + 3].u.operand;
1609 // create jump table for switch destinations, track this switch statement.
1610 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1611 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1612 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1614 emitGetPutArg(scrutinee, 0, X86::ecx);
1615 emitPutArgConstant(tableIndex, 4);
1616 emitCall(i, Machine::cti_op_switch_char);
1617 m_jit.jmp_r(X86::eax);
1621 case op_switch_string: {
1622 unsigned tableIndex = instruction[i + 1].u.operand;
1623 unsigned defaultOffset = instruction[i + 2].u.operand;
1624 unsigned scrutinee = instruction[i + 3].u.operand;
1626 // create jump table for switch destinations, track this switch statement.
1627 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1628 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1630 emitGetPutArg(scrutinee, 0, X86::ecx);
1631 emitPutArgConstant(tableIndex, 4);
1632 emitCall(i, Machine::cti_op_switch_string);
1633 m_jit.jmp_r(X86::eax);
1637 case op_del_by_val: {
1638 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1639 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1640 emitCall(i, Machine::cti_op_del_by_val);
1641 emitPutResult(instruction[i + 1].u.operand);
1645 case op_put_getter: {
1646 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1647 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1648 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1649 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1650 emitCall(i, Machine::cti_op_put_getter);
1654 case op_put_setter: {
1655 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1656 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1657 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1658 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1659 emitCall(i, Machine::cti_op_put_setter);
1663 case op_new_error: {
1664 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1665 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1666 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1667 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1668 emitCall(i, Machine::cti_op_new_error);
1669 emitPutResult(instruction[i + 1].u.operand);
1674 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1675 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1676 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1677 emitCall(i, Machine::cti_op_debug);
1682 unsigned dst = instruction[i + 1].u.operand;
1683 unsigned src1 = instruction[i + 2].u.operand;
1685 emitGetArg(src1, X86::eax);
1686 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1687 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1689 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1690 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1691 m_jit.setnz_r(X86::eax);
1693 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1695 m_jit.link(isImmediate, m_jit.label());
1697 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1698 m_jit.andl_rr(X86::eax, X86::ecx);
1699 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1700 m_jit.sete_r(X86::eax);
1702 m_jit.link(wasNotImmediate, m_jit.label());
1704 m_jit.movzbl_rr(X86::eax, X86::eax);
1705 emitTagAsBoolImmediate(X86::eax);
1712 unsigned dst = instruction[i + 1].u.operand;
1713 unsigned src1 = instruction[i + 2].u.operand;
1715 emitGetArg(src1, X86::eax);
1716 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1717 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1719 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1720 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1721 m_jit.setz_r(X86::eax);
1723 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1725 m_jit.link(isImmediate, m_jit.label());
1727 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1728 m_jit.andl_rr(X86::eax, X86::ecx);
1729 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1730 m_jit.setne_r(X86::eax);
1732 m_jit.link(wasNotImmediate, m_jit.label());
1734 m_jit.movzbl_rr(X86::eax, X86::eax);
1735 emitTagAsBoolImmediate(X86::eax);
1742 // Even though CTI doesn't use them, we initialize our constant
1743 // registers to zap stale pointers, to avoid unnecessarily prolonging
1744 // object lifetime and increasing GC pressure.
1745 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
1746 for (size_t j = 0; j < count; ++j)
1747 emitInitRegister(j);
1752 case op_get_array_length:
1753 case op_get_by_id_chain:
1754 case op_get_by_id_generic:
1755 case op_get_by_id_proto:
1756 case op_get_by_id_self:
1757 case op_get_string_length:
1758 case op_put_by_id_generic:
1759 case op_put_by_id_replace:
1760 case op_put_by_id_transition:
1761 ASSERT_NOT_REACHED();
1765 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1769 void CTI::privateCompileLinkPass()
1771 unsigned jmpTableCount = m_jmpTable.size();
1772 for (unsigned i = 0; i < jmpTableCount; ++i)
1773 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
1777 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
1779 m_jit.link(iter->from, m_jit.label()); \
1780 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
1781 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
1782 emitCall(i, Machine::cti_##name); \
1783 emitPutResult(instruction[i + 1].u.operand); \
1788 void CTI::privateCompileSlowCases()
1790 unsigned structureIDInstructionIndex = 0;
1792 Instruction* instruction = m_codeBlock->instructions.begin();
1793 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
1794 unsigned i = iter->to;
1795 m_jit.emitRestoreArgumentReference();
1796 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
1798 unsigned dst = instruction[i + 1].u.operand;
1799 unsigned src2 = instruction[i + 3].u.operand;
1800 if (isConstant(src2)) {
1801 JSValue* value = getConstant(m_exec, src2);
1802 if (JSImmediate::isNumber(value)) {
1803 X86Assembler::JmpSrc notImm = iter->from;
1804 m_jit.link((++iter)->from, m_jit.label());
1805 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1806 m_jit.link(notImm, m_jit.label());
1807 emitPutArg(X86::eax, 0);
1808 emitGetPutArg(src2, 4, X86::ecx);
1809 emitCall(i, Machine::cti_op_add);
1816 ASSERT(!isConstant(instruction[i + 2].u.operand));
1818 X86Assembler::JmpSrc notImm = iter->from;
1819 m_jit.link((++iter)->from, m_jit.label());
1820 m_jit.subl_rr(X86::edx, X86::eax);
1821 emitFastArithReTagImmediate(X86::eax);
1822 m_jit.link(notImm, m_jit.label());
1823 emitPutArg(X86::eax, 0);
1824 emitPutArg(X86::edx, 4);
1825 emitCall(i, Machine::cti_op_add);
1830 case op_get_by_val: {
1831 // The slow case that handles accesses to arrays (below) may jump back up to here.
1832 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
1834 X86Assembler::JmpSrc notImm = iter->from;
1835 m_jit.link((++iter)->from, m_jit.label());
1836 m_jit.link((++iter)->from, m_jit.label());
1837 emitFastArithIntToImmNoCheck(X86::edx);
1838 m_jit.link(notImm, m_jit.label());
1839 emitPutArg(X86::eax, 0);
1840 emitPutArg(X86::edx, 4);
1841 emitCall(i, Machine::cti_op_get_by_val);
1842 emitPutResult(instruction[i + 1].u.operand);
1843 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1845 // This is slow case that handles accesses to arrays above the fast cut-off.
1846 // First, check if this is an access to the vector
1847 m_jit.link((++iter)->from, m_jit.label());
1848 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1849 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
1851 // okay, missed the fast region, but it is still in the vector. Get the value.
1852 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
1853 // Check whether the value loaded is zero; if so we need to return undefined.
1854 m_jit.testl_rr(X86::ecx, X86::ecx);
1855 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
1856 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1862 X86Assembler::JmpSrc notImm = iter->from;
1863 m_jit.link((++iter)->from, m_jit.label());
1864 m_jit.addl_rr(X86::edx, X86::eax);
1865 m_jit.link(notImm, m_jit.label());
1866 emitPutArg(X86::eax, 0);
1867 emitPutArg(X86::edx, 4);
1868 emitCall(i, Machine::cti_op_sub);
1869 emitPutResult(instruction[i + 1].u.operand);
1874 m_jit.link(iter->from, m_jit.label());
1875 m_jit.link((++iter)->from, m_jit.label());
1876 emitPutArg(X86::eax, 0);
1877 emitPutArg(X86::ecx, 4);
1878 emitCall(i, Machine::cti_op_rshift);
1879 emitPutResult(instruction[i + 1].u.operand);
1884 X86Assembler::JmpSrc notImm1 = iter->from;
1885 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1886 m_jit.link((++iter)->from, m_jit.label());
1887 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1888 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1889 m_jit.link(notImm1, m_jit.label());
1890 m_jit.link(notImm2, m_jit.label());
1891 emitPutArg(X86::eax, 0);
1892 emitPutArg(X86::ecx, 4);
1893 emitCall(i, Machine::cti_op_lshift);
1894 emitPutResult(instruction[i + 1].u.operand);
1898 case op_loop_if_less: {
1899 emitSlowScriptCheck(i);
1901 unsigned target = instruction[i + 3].u.operand;
1902 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1904 m_jit.link(iter->from, m_jit.label());
1905 emitPutArg(X86::edx, 0);
1906 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1907 emitCall(i, Machine::cti_op_loop_if_less);
1908 m_jit.testl_rr(X86::eax, X86::eax);
1909 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1911 m_jit.link(iter->from, m_jit.label());
1912 m_jit.link((++iter)->from, m_jit.label());
1913 emitPutArg(X86::eax, 0);
1914 emitPutArg(X86::edx, 4);
1915 emitCall(i, Machine::cti_op_loop_if_less);
1916 m_jit.testl_rr(X86::eax, X86::eax);
1917 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1922 case op_put_by_id: {
1923 m_jit.link(iter->from, m_jit.label());
1924 m_jit.link((++iter)->from, m_jit.label());
1926 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1927 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1928 emitPutArg(X86::eax, 0);
1929 emitPutArg(X86::edx, 8);
1930 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
1932 // Track the location of the call; this will be used to recover repatch information.
1933 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1934 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1935 ++structureIDInstructionIndex;
1940 case op_get_by_id: {
1941 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1942 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1943 // of the call (which we can use to look up the repatch information), but should a array-length or
1944 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back
1945 // the distance from the call to the head of the slow case.
1947 m_jit.link(iter->from, m_jit.label());
1948 m_jit.link((++iter)->from, m_jit.label());
1951 X86Assembler::JmpDst coldPathBegin = m_jit.label();
1953 emitPutArg(X86::eax, 0);
1954 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1955 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1956 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
1957 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
1958 emitPutResult(instruction[i + 1].u.operand);
1960 // Track the location of the call; this will be used to recover repatch information.
1961 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1962 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1963 ++structureIDInstructionIndex;
1968 case op_resolve_global: {
1969 ++structureIDInstructionIndex;
1973 case op_loop_if_lesseq: {
1974 emitSlowScriptCheck(i);
1976 unsigned target = instruction[i + 3].u.operand;
1977 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1979 m_jit.link(iter->from, m_jit.label());
1980 emitPutArg(X86::edx, 0);
1981 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1982 emitCall(i, Machine::cti_op_loop_if_lesseq);
1983 m_jit.testl_rr(X86::eax, X86::eax);
1984 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1986 m_jit.link(iter->from, m_jit.label());
1987 m_jit.link((++iter)->from, m_jit.label());
1988 emitPutArg(X86::eax, 0);
1989 emitPutArg(X86::edx, 4);
1990 emitCall(i, Machine::cti_op_loop_if_lesseq);
1991 m_jit.testl_rr(X86::eax, X86::eax);
1992 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1998 unsigned srcDst = instruction[i + 1].u.operand;
1999 X86Assembler::JmpSrc notImm = iter->from;
2000 m_jit.link((++iter)->from, m_jit.label());
2001 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2002 m_jit.link(notImm, m_jit.label());
2003 emitPutArg(X86::eax, 0);
2004 emitCall(i, Machine::cti_op_pre_inc);
2005 emitPutResult(srcDst);
2009 case op_put_by_val: {
2010 // Normal slow cases - either is not an immediate imm, or is an array.
2011 X86Assembler::JmpSrc notImm = iter->from;
2012 m_jit.link((++iter)->from, m_jit.label());
2013 m_jit.link((++iter)->from, m_jit.label());
2014 emitFastArithIntToImmNoCheck(X86::edx);
2015 m_jit.link(notImm, m_jit.label());
2016 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2017 emitPutArg(X86::eax, 0);
2018 emitPutArg(X86::edx, 4);
2019 emitPutArg(X86::ecx, 8);
2020 emitCall(i, Machine::cti_op_put_by_val);
2021 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2023 // slow cases for immediate int accesses to arrays
2024 m_jit.link((++iter)->from, m_jit.label());
2025 m_jit.link((++iter)->from, m_jit.label());
2026 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2027 emitPutArg(X86::eax, 0);
2028 emitPutArg(X86::edx, 4);
2029 emitPutArg(X86::ecx, 8);
2030 emitCall(i, Machine::cti_op_put_by_val_array);
2035 case op_loop_if_true: {
2036 emitSlowScriptCheck(i);
2038 m_jit.link(iter->from, m_jit.label());
2039 emitPutArg(X86::eax, 0);
2040 emitCall(i, Machine::cti_op_jtrue);
2041 m_jit.testl_rr(X86::eax, X86::eax);
2042 unsigned target = instruction[i + 2].u.operand;
2043 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2048 unsigned srcDst = instruction[i + 1].u.operand;
2049 X86Assembler::JmpSrc notImm = iter->from;
2050 m_jit.link((++iter)->from, m_jit.label());
2051 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2052 m_jit.link(notImm, m_jit.label());
2053 emitPutArg(X86::eax, 0);
2054 emitCall(i, Machine::cti_op_pre_dec);
2055 emitPutResult(srcDst);
2060 unsigned target = instruction[i + 3].u.operand;
2061 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2063 m_jit.link(iter->from, m_jit.label());
2064 emitPutArg(X86::edx, 0);
2065 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2066 emitCall(i, Machine::cti_op_jless);
2067 m_jit.testl_rr(X86::eax, X86::eax);
2068 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2070 m_jit.link(iter->from, m_jit.label());
2071 m_jit.link((++iter)->from, m_jit.label());
2072 emitPutArg(X86::eax, 0);
2073 emitPutArg(X86::edx, 4);
2074 emitCall(i, Machine::cti_op_jless);
2075 m_jit.testl_rr(X86::eax, X86::eax);
2076 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2082 m_jit.link(iter->from, m_jit.label());
2083 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2084 emitPutArg(X86::eax, 0);
2085 emitCall(i, Machine::cti_op_not);
2086 emitPutResult(instruction[i + 1].u.operand);
2091 m_jit.link(iter->from, m_jit.label());
2092 emitPutArg(X86::eax, 0);
2093 emitCall(i, Machine::cti_op_jtrue);
2094 m_jit.testl_rr(X86::eax, X86::eax);
2095 unsigned target = instruction[i + 2].u.operand;
2096 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2101 unsigned srcDst = instruction[i + 2].u.operand;
2102 m_jit.link(iter->from, m_jit.label());
2103 m_jit.link((++iter)->from, m_jit.label());
2104 emitPutArg(X86::eax, 0);
2105 emitCall(i, Machine::cti_op_post_inc);
2106 emitPutResult(instruction[i + 1].u.operand);
2107 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2108 emitPutResult(srcDst);
2113 m_jit.link(iter->from, m_jit.label());
2114 emitPutArg(X86::eax, 0);
2115 emitCall(i, Machine::cti_op_bitnot);
2116 emitPutResult(instruction[i + 1].u.operand);
2121 unsigned src1 = instruction[i + 2].u.operand;
2122 unsigned src2 = instruction[i + 3].u.operand;
2123 unsigned dst = instruction[i + 1].u.operand;
2124 if (getConstantImmediateNumericArg(src1)) {
2125 m_jit.link(iter->from, m_jit.label());
2126 emitGetPutArg(src1, 0, X86::ecx);
2127 emitPutArg(X86::eax, 4);
2128 emitCall(i, Machine::cti_op_bitand);
2130 } else if (getConstantImmediateNumericArg(src2)) {
2131 m_jit.link(iter->from, m_jit.label());
2132 emitPutArg(X86::eax, 0);
2133 emitGetPutArg(src2, 4, X86::ecx);
2134 emitCall(i, Machine::cti_op_bitand);
2137 m_jit.link(iter->from, m_jit.label());
2138 emitGetPutArg(src1, 0, X86::ecx);
2139 emitPutArg(X86::edx, 4);
2140 emitCall(i, Machine::cti_op_bitand);
2147 m_jit.link(iter->from, m_jit.label());
2148 emitPutArg(X86::eax, 0);
2149 emitCall(i, Machine::cti_op_jtrue);
2150 m_jit.testl_rr(X86::eax, X86::eax);
2151 unsigned target = instruction[i + 2].u.operand;
2152 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2157 unsigned srcDst = instruction[i + 2].u.operand;
2158 m_jit.link(iter->from, m_jit.label());
2159 m_jit.link((++iter)->from, m_jit.label());
2160 emitPutArg(X86::eax, 0);
2161 emitCall(i, Machine::cti_op_post_dec);
2162 emitPutResult(instruction[i + 1].u.operand);
2163 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2164 emitPutResult(srcDst);
2169 m_jit.link(iter->from, m_jit.label());
2170 emitPutArg(X86::eax, 0);
2171 emitPutArg(X86::edx, 4);
2172 emitCall(i, Machine::cti_op_bitxor);
2173 emitPutResult(instruction[i + 1].u.operand);
2178 m_jit.link(iter->from, m_jit.label());
2179 emitPutArg(X86::eax, 0);
2180 emitPutArg(X86::edx, 4);
2181 emitCall(i, Machine::cti_op_bitor);
2182 emitPutResult(instruction[i + 1].u.operand);
2187 m_jit.link(iter->from, m_jit.label());
2188 emitPutArg(X86::eax, 0);
2189 emitPutArg(X86::edx, 4);
2190 emitCall(i, Machine::cti_op_eq);
2191 emitPutResult(instruction[i + 1].u.operand);
2196 m_jit.link(iter->from, m_jit.label());
2197 emitPutArg(X86::eax, 0);
2198 emitPutArg(X86::edx, 4);
2199 emitCall(i, Machine::cti_op_neq);
2200 emitPutResult(instruction[i + 1].u.operand);
2204 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2205 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2206 case op_instanceof: {
2207 m_jit.link(iter->from, m_jit.label());
2208 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2209 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2210 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2211 emitCall(i, Machine::cti_op_instanceof);
2212 emitPutResult(instruction[i + 1].u.operand);
2217 X86Assembler::JmpSrc notImm1 = iter->from;
2218 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2219 m_jit.link((++iter)->from, m_jit.label());
2220 emitFastArithReTagImmediate(X86::eax);
2221 emitFastArithReTagImmediate(X86::ecx);
2222 m_jit.link(notImm1, m_jit.label());
2223 m_jit.link(notImm2, m_jit.label());
2224 emitPutArg(X86::eax, 0);
2225 emitPutArg(X86::ecx, 4);
2226 emitCall(i, Machine::cti_op_mod);
2227 emitPutResult(instruction[i + 1].u.operand);
2231 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_mul);
2234 case op_construct: {
2235 m_jit.link(iter->from, m_jit.label());
2236 m_jit.emitRestoreArgumentReference();
2238 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2239 emitCall(i, Machine::cti_vm_compile);
2240 emitCall(i, X86::eax);
2242 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2244 // In the interpreter the following actions are performed by op_ret:
2246 // Restore ExecState::m_scopeChain and CTI_ARGS_scopeChain. NOTE: After
2247 // op_ret, %edx holds the caller's scope chain.
2248 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
2249 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
2250 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
2251 // Restore ExecState::m_callFrame.
2252 m_jit.movl_rm(X86::edi, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
2253 // Restore CTI_ARGS_codeBlock.
2254 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
2256 emitPutResult(instruction[i + 1].u.operand);
2262 ASSERT_NOT_REACHED();
2266 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2269 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2272 void CTI::privateCompile()
2274 // Could use a popl_m, but would need to offset the following instruction if so.
2275 m_jit.popl_r(X86::ecx);
2276 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
2277 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2279 // Lazy copy of the scopeChain
2280 X86Assembler::JmpSrc callToUpdateScopeChain;
2281 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain) {
2282 m_jit.emitRestoreArgumentReference();
2283 callToUpdateScopeChain = m_jit.emitCall();
2286 privateCompileMainPass();
2287 privateCompileLinkPass();
2288 privateCompileSlowCases();
2290 ASSERT(m_jmpTable.isEmpty());
2292 void* code = m_jit.copy();
2295 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2296 for (unsigned i = 0; i < m_switches.size(); ++i) {
2297 SwitchRecord record = m_switches[i];
2298 unsigned opcodeIndex = record.m_opcodeIndex;
2300 if (record.m_type != SwitchRecord::String) {
2301 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2302 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2304 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2306 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2307 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2308 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2311 ASSERT(record.m_type == SwitchRecord::String);
2313 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2315 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2316 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2317 unsigned offset = it->second.branchOffset;
2318 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2323 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2324 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2326 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2328 X86Assembler::link(code, iter->from, iter->to);
2329 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2332 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain)
2333 X86Assembler::link(code, callToUpdateScopeChain, (void*)Machine::cti_vm_updateScopeChain);
2335 // Link absolute addresses for jsr
2336 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2337 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2339 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2340 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2341 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2342 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2345 m_codeBlock->ctiCode = code;
2348 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2350 // Check eax is an object of the right StructureID.
2351 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2352 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2353 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2354 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2356 // Checks out okay! - getDirectOffset
2357 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2358 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2361 void* code = m_jit.copy();
2364 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2365 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2367 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2369 ctiRepatchCallByReturnAddress(returnAddress, code);
2372 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2374 #if USE(CTI_REPATCH_PIC)
2375 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2377 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2378 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2380 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2381 // referencing the prototype object - let's speculatively load it's table nice and early!)
2382 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2383 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2384 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2386 // check eax is an object of the right StructureID.
2387 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2388 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2389 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2390 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2392 // Check the prototype object's StructureID had not changed.
2393 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2394 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2395 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2397 // Checks out okay! - getDirectOffset
2398 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2400 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2402 void* code = m_jit.copy();
2405 // Use the repatch information to link the failure cases back to the original slow case routine.
2406 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2407 X86Assembler::link(code, failureCases1, slowCaseBegin);
2408 X86Assembler::link(code, failureCases2, slowCaseBegin);
2409 X86Assembler::link(code, failureCases3, slowCaseBegin);
2411 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2412 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2413 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2415 // Track the stub we have created so that it will be deleted later.
2416 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2418 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2419 // FIXME: should revert this repatching, on failure.
2420 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2421 X86Assembler::repatchBranchOffset(jmpLocation, code);
2423 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2424 // referencing the prototype object - let's speculatively load it's table nice and early!)
2425 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2426 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2427 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2429 // check eax is an object of the right StructureID.
2430 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2431 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2432 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2433 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2435 // Check the prototype object's StructureID had not changed.
2436 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2437 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2438 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2440 // Checks out okay! - getDirectOffset
2441 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2445 void* code = m_jit.copy();
2448 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2449 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2450 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2452 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2454 ctiRepatchCallByReturnAddress(returnAddress, code);
2458 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2462 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2464 // Check eax is an object of the right StructureID.
2465 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2466 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2467 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2468 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2470 StructureID* currStructureID = structureID;
2471 RefPtr<StructureID>* chainEntries = chain->head();
2472 JSObject* protoObject = 0;
2473 for (unsigned i = 0; i<count; ++i) {
2474 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2475 currStructureID = chainEntries[i].get();
2477 // Check the prototype object's StructureID had not changed.
2478 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2479 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2480 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2482 ASSERT(protoObject);
2484 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2485 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2486 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2489 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2491 void* code = m_jit.copy();
2494 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2495 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2497 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2499 ctiRepatchCallByReturnAddress(returnAddress, code);
2502 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2504 // check eax is an object of the right StructureID.
2505 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2506 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2507 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2508 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2510 // checks out okay! - putDirectOffset
2511 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2512 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2515 void* code = m_jit.copy();
2518 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2519 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2521 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2523 ctiRepatchCallByReturnAddress(returnAddress, code);
2528 static JSValue* SFX_CALL transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2530 StructureID* oldStructureID = newStructureID->previousID();
2532 baseObject->transitionTo(newStructureID);
2534 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2535 baseObject->allocatePropertyStorage(oldStructureID->propertyMap().storageSize(), oldStructureID->propertyMap().size());
2537 baseObject->putDirectOffset(cachedOffset, value);
2543 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2545 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2548 if (oldStructureID->propertyMap().storageSize() < JSObject::inlineStorageCapacity)
2551 if (oldStructureID->propertyMap().size() != newStructureID->propertyMap().size())
2557 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2559 Vector<X86Assembler::JmpSrc, 16> failureCases;
2560 // check eax is an object of the right StructureID.
2561 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2562 failureCases.append(m_jit.emitUnlinkedJne());
2563 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2564 failureCases.append(m_jit.emitUnlinkedJne());
2565 Vector<X86Assembler::JmpSrc> successCases;
2568 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2569 // proto(ecx) = baseObject->structureID()->prototype()
2570 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2571 failureCases.append(m_jit.emitUnlinkedJne());
2572 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2574 // ecx = baseObject->m_structureID
2575 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2576 // null check the prototype
2577 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2578 successCases.append(m_jit.emitUnlinkedJe());
2580 // Check the structure id
2581 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2582 failureCases.append(m_jit.emitUnlinkedJne());
2584 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2585 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2586 failureCases.append(m_jit.emitUnlinkedJne());
2587 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2590 failureCases.append(m_jit.emitUnlinkedJne());
2591 for (unsigned i = 0; i < successCases.size(); ++i)
2592 m_jit.link(successCases[i], m_jit.label());
2594 X86Assembler::JmpSrc callTarget;
2595 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2596 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2597 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2598 // codeblock should ensure oldStructureID->m_refCount > 0
2599 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2600 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2601 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2604 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2605 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2607 // Slow case transition -- we're going to need to quite a bit of work,
2608 // so just make a call
2609 m_jit.pushl_r(X86::edx);
2610 m_jit.pushl_r(X86::eax);
2611 m_jit.movl_i32r(cachedOffset, X86::eax);
2612 m_jit.pushl_r(X86::eax);
2613 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2614 m_jit.pushl_r(X86::eax);
2615 callTarget = m_jit.emitCall();
2616 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2619 void* code = m_jit.copy();
2622 for (unsigned i = 0; i < failureCases.size(); ++i)
2623 X86Assembler::link(code, failureCases[i], reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2625 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2626 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2628 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2630 ctiRepatchCallByReturnAddress(returnAddress, code);
2633 void* CTI::privateCompileArrayLengthTrampoline()
2635 // Check eax is an array
2636 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2637 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2638 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2639 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2641 // Checks out okay! - get the length from the storage
2642 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2643 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2645 m_jit.addl_rr(X86::eax, X86::eax);
2646 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2647 m_jit.addl_i8r(1, X86::eax);
2651 void* code = m_jit.copy();
2654 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2655 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2656 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2661 void* CTI::privateCompileStringLengthTrampoline()
2663 // Check eax is a string
2664 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2665 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2666 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2667 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2669 // Checks out okay! - get the length from the Ustring.
2670 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2671 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2673 m_jit.addl_rr(X86::eax, X86::eax);
2674 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2675 m_jit.addl_i8r(1, X86::eax);
2679 void* code = m_jit.copy();
2682 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2683 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2684 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2689 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2691 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2693 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2694 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2695 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2697 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2698 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2699 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2702 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2704 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2706 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2707 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2708 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2710 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2711 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2712 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2715 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2717 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2719 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2720 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2722 // Check eax is an array
2723 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2724 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2725 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2726 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2728 // Checks out okay! - get the length from the storage
2729 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2730 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2732 m_jit.addl_rr(X86::ecx, X86::ecx);
2733 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2734 m_jit.addl_i8r(1, X86::ecx);
2736 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2738 void* code = m_jit.copy();
2741 // Use the repatch information to link the failure cases back to the original slow case routine.
2742 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2743 X86Assembler::link(code, failureCases1, slowCaseBegin);
2744 X86Assembler::link(code, failureCases2, slowCaseBegin);
2745 X86Assembler::link(code, failureCases3, slowCaseBegin);
2747 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2748 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2749 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2751 // Track the stub we have created so that it will be deleted later.
2752 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2754 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2755 // FIXME: should revert this repatching, on failure.
2756 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2757 X86Assembler::repatchBranchOffset(jmpLocation, code);
2760 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
2762 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
2763 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
2764 m_jit.movl_mr(index * sizeof(Register), dst, dst);
2767 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
2769 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
2770 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
2771 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
2776 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
2778 // TODO: better error messages
2779 if (pattern.size() > MaxPatternSize) {
2780 *error_ptr = "regular expression too large";
2784 X86Assembler jit(exec->machine()->jitCodeBuffer());
2785 WRECParser parser(pattern, ignoreCase, multiline, jit);
2787 jit.emitConvertToFastCall();
2789 // Preserve regs & initialize outputRegister.
2790 jit.pushl_r(WRECGenerator::outputRegister);
2791 jit.pushl_r(WRECGenerator::currentValueRegister);
2792 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
2793 jit.pushl_r(WRECGenerator::currentPositionRegister);
2794 // load output pointer
2799 , X86::esp, WRECGenerator::outputRegister);
2801 // restart point on match fail.
2802 WRECGenerator::JmpDst nextLabel = jit.label();
2804 // (1) Parse Disjunction:
2806 // Parsing the disjunction should fully consume the pattern.
2807 JmpSrcVector failures;
2808 parser.parseDisjunction(failures);
2809 if (parser.isEndOfPattern()) {
2810 parser.m_err = WRECParser::Error_malformedPattern;
2813 // TODO: better error messages
2814 *error_ptr = "TODO: better error messages";
2819 // Set return value & pop registers from the stack.
2821 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
2822 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
2824 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
2825 jit.popl_r(X86::eax);
2826 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2827 jit.popl_r(WRECGenerator::currentValueRegister);
2828 jit.popl_r(WRECGenerator::outputRegister);
2831 jit.link(noOutput, jit.label());
2833 jit.popl_r(X86::eax);
2834 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2835 jit.popl_r(WRECGenerator::currentValueRegister);
2836 jit.popl_r(WRECGenerator::outputRegister);
2840 // All fails link to here. Progress the start point & if it is within scope, loop.
2841 // Otherwise, return fail value.
2842 WRECGenerator::JmpDst here = jit.label();
2843 for (unsigned i = 0; i < failures.size(); ++i)
2844 jit.link(failures[i], here);
2847 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
2848 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
2849 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
2850 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
2851 jit.link(jit.emitUnlinkedJle(), nextLabel);
2853 jit.addl_i8r(4, X86::esp);
2855 jit.movl_i32r(-1, X86::eax);
2856 jit.popl_r(WRECGenerator::currentValueRegister);
2857 jit.popl_r(WRECGenerator::outputRegister);
2860 *numSubpatterns_ptr = parser.m_numSubpatterns;
2862 void* code = jit.copy();
2867 #endif // ENABLE(WREC)
2871 #endif // ENABLE(CTI)