2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
34 #include "wrec/WREC.h"
40 #if COMPILER(GCC) && PLATFORM(X86)
42 ".globl _ctiTrampoline" "\n"
43 "_ctiTrampoline:" "\n"
46 "subl $0x24, %esp" "\n"
47 "movl $512, %esi" "\n"
48 "call *0x30(%esp)" "\n" //Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
49 "addl $0x24, %esp" "\n"
56 ".globl _ctiVMThrowTrampoline" "\n"
57 "_ctiVMThrowTrampoline:" "\n"
59 "movl 0x34(%esp), %ecx" "\n" //Ox34 = 0x0D * 4, 0x0D = CTI_ARGS_exec
60 "cmpl $0, 8(%ecx)" "\n"
65 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
66 "addl $0x24, %esp" "\n"
76 __declspec(naked) JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**)
92 __declspec(naked) void ctiVMThrowTrampoline()
96 call JSC::Machine::cti_vm_throw;
109 ALWAYS_INLINE bool CTI::isConstant(int src)
111 return src >= m_codeBlock->numVars && src < m_codeBlock->numVars + m_codeBlock->numConstants;
114 ALWAYS_INLINE JSValue* CTI::getConstant(ExecState* exec, int src)
116 return m_codeBlock->constantRegisters[src - m_codeBlock->numVars].jsValue(exec);
119 // get arg puts an arg from the SF register array into a h/w register
120 ALWAYS_INLINE void CTI::emitGetArg(unsigned src, X86Assembler::RegisterID dst)
122 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
123 if (isConstant(src)) {
124 JSValue* js = getConstant(m_exec, src);
125 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
127 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
130 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
131 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
133 if (isConstant(src)) {
134 JSValue* js = getConstant(m_exec, src);
135 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
137 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
138 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
142 // puts an arg onto the stack, as an arg to a context threaded function.
143 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
145 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
148 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
150 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
153 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
155 if (isConstant(src)) {
156 JSValue* js = getConstant(m_exec, src);
157 return JSImmediate::isNumber(js) ? js : 0;
162 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
164 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
167 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
169 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
172 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
174 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
177 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
179 m_jit.movl_rm(from, entry * sizeof(Register), X86::edi);
182 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
184 m_jit.movl_mr(entry * sizeof(Register), X86::edi, to);
187 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
189 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
190 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
193 ALWAYS_INLINE void CTI::emitInitRegister(unsigned dst)
195 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
196 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
199 #if ENABLE(SAMPLING_TOOL)
200 unsigned inCalledCode = 0;
203 void ctiSetReturnAddress(void** where, void* what)
208 void ctiRepatchCallByReturnAddress(void* where, void* what)
210 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
215 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
221 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
223 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
224 m_jit.cmpl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
225 X86Assembler::JmpSrc noException = m_jit.emitUnlinkedJe();
227 m_jit.link(noException, m_jit.label());
230 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
233 if (isConstant(src1)) {
234 JSValue* js = getConstant(m_exec, src1);
236 JSImmediate::isImmediate(js) ?
237 (JSImmediate::isNumber(js) ? 'i' :
238 JSImmediate::isBoolean(js) ? 'b' :
239 js->isUndefined() ? 'u' :
240 js->isNull() ? 'n' : '?')
242 (js->isString() ? 's' :
243 js->isObject() ? 'o' :
247 if (isConstant(src2)) {
248 JSValue* js = getConstant(m_exec, src2);
250 JSImmediate::isImmediate(js) ?
251 (JSImmediate::isNumber(js) ? 'i' :
252 JSImmediate::isBoolean(js) ? 'b' :
253 js->isUndefined() ? 'u' :
254 js->isNull() ? 'n' : '?')
256 (js->isString() ? 's' :
257 js->isObject() ? 'o' :
260 if ((which1 != '*') | (which2 != '*'))
261 fprintf(stderr, "Types %c %c\n", which1, which2);
266 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, X86::RegisterID r)
268 X86Assembler::JmpSrc call = m_jit.emitCall(r);
269 m_calls.append(CallRecord(call, opcodeIndex));
270 emitDebugExceptionCheck();
275 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
277 #if ENABLE(SAMPLING_TOOL)
278 m_jit.movl_i32m(1, &inCalledCode);
280 X86Assembler::JmpSrc call = m_jit.emitCall();
281 m_calls.append(CallRecord(call, helper, opcodeIndex));
282 emitDebugExceptionCheck();
283 #if ENABLE(SAMPLING_TOOL)
284 m_jit.movl_i32m(0, &inCalledCode);
290 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
292 #if ENABLE(SAMPLING_TOOL)
293 m_jit.movl_i32m(1, &inCalledCode);
295 X86Assembler::JmpSrc call = m_jit.emitCall();
296 m_calls.append(CallRecord(call, helper, opcodeIndex));
297 emitDebugExceptionCheck();
298 #if ENABLE(SAMPLING_TOOL)
299 m_jit.movl_i32m(0, &inCalledCode);
305 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
307 #if ENABLE(SAMPLING_TOOL)
308 m_jit.movl_i32m(1, &inCalledCode);
310 X86Assembler::JmpSrc call = m_jit.emitCall();
311 m_calls.append(CallRecord(call, helper, opcodeIndex));
312 emitDebugExceptionCheck();
313 #if ENABLE(SAMPLING_TOOL)
314 m_jit.movl_i32m(0, &inCalledCode);
320 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
322 #if ENABLE(SAMPLING_TOOL)
323 m_jit.movl_i32m(1, &inCalledCode);
325 X86Assembler::JmpSrc call = m_jit.emitCall();
326 m_calls.append(CallRecord(call, helper, opcodeIndex));
327 emitDebugExceptionCheck();
328 #if ENABLE(SAMPLING_TOOL)
329 m_jit.movl_i32m(0, &inCalledCode);
335 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
337 #if ENABLE(SAMPLING_TOOL)
338 m_jit.movl_i32m(1, &inCalledCode);
340 X86Assembler::JmpSrc call = m_jit.emitCall();
341 m_calls.append(CallRecord(call, helper, opcodeIndex));
342 emitDebugExceptionCheck();
343 #if ENABLE(SAMPLING_TOOL)
344 m_jit.movl_i32m(0, &inCalledCode);
350 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
352 m_jit.testl_i32r(JSImmediate::TagMask, reg);
353 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
356 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
358 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
359 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
362 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
364 m_jit.movl_rr(reg1, X86::ecx);
365 m_jit.andl_rr(reg2, X86::ecx);
366 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
369 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
371 ASSERT(JSImmediate::isNumber(imm));
372 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
375 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
377 // op_mod relies on this being a sub - setting zf if result is 0.
378 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
381 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
383 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
386 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
388 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
391 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
393 m_jit.sarl_i8r(1, reg);
396 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
398 m_jit.addl_rr(reg, reg);
399 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
400 emitFastArithReTagImmediate(reg);
403 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
405 m_jit.addl_rr(reg, reg);
406 emitFastArithReTagImmediate(reg);
409 ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
411 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
412 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
415 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
416 : m_jit(machine->jitCodeBuffer())
419 , m_codeBlock(codeBlock)
420 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
421 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
425 #define CTI_COMPILE_BINARY_OP(name) \
427 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
428 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
429 emitCall(i, Machine::cti_##name); \
430 emitPutResult(instruction[i + 1].u.operand); \
435 #define CTI_COMPILE_UNARY_OP(name) \
437 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
438 emitCall(i, Machine::cti_##name); \
439 emitPutResult(instruction[i + 1].u.operand); \
444 #if ENABLE(SAMPLING_TOOL)
445 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
448 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
450 int dst = instruction[i + 1].u.operand;
451 int firstArg = instruction[i + 4].u.operand;
452 int argCount = instruction[i + 5].u.operand;
453 int registerOffset = instruction[i + 6].u.operand;
455 if (type == OpCallEval)
456 emitGetPutArg(instruction[i + 3].u.operand, 16, X86::ecx);
458 if (type == OpConstruct) {
459 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 20);
460 emitPutArgConstant(argCount, 16);
461 emitPutArgConstant(registerOffset, 12);
462 emitPutArgConstant(firstArg, 8);
463 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
465 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 12);
466 emitPutArgConstant(argCount, 8);
467 emitPutArgConstant(registerOffset, 4);
469 int thisVal = instruction[i + 3].u.operand;
470 if (thisVal == missingThisObjectMarker()) {
471 // FIXME: should this be loaded dynamically off m_exec?
472 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_exec->globalThisValue()), firstArg * sizeof(Register), X86::edi);
474 emitGetArg(thisVal, X86::ecx);
475 emitPutResult(firstArg, X86::ecx);
479 X86Assembler::JmpSrc wasEval;
480 if (type == OpCallEval) {
481 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
482 emitCall(i, Machine::cti_op_call_eval);
483 m_jit.emitRestoreArgumentReference();
485 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
487 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
488 wasEval = m_jit.emitUnlinkedJne();
490 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
491 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
493 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
494 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
497 // Fast check for JS function.
498 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
499 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
500 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
501 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
502 m_jit.link(isNotObject, m_jit.label());
504 // This handles host functions
505 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
507 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
508 m_jit.link(isJSFunction, m_jit.label());
510 // This handles JSFunctions
511 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction));
513 // Initialize the parts of the call frame that have not already been initialized.
514 emitGetCTIParam(CTI_ARGS_r, X86::edi);
515 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), RegisterFile::CallerCodeBlock * static_cast<int>(sizeof(Register)), X86::edi);
516 m_jit.movl_i32m(dst, RegisterFile::ReturnValueRegister * static_cast<int>(sizeof(Register)), X86::edi);
518 // Check the ctiCode has been generated - if not, this is handled in a slow case.
519 m_jit.testl_rr(X86::eax, X86::eax);
520 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
521 emitCall(i, X86::eax);
523 // In the interpreter the following actions are performed by op_ret:
525 // Restore ExecState::m_scopeChain and CTI_ARGS_scopeChain. NOTE: After
526 // op_ret, %edx holds the caller's scope chain.
527 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
528 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
529 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
530 // Restore ExecState::m_callFrame.
531 m_jit.movl_rm(X86::edi, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
532 // Restore CTI_ARGS_codeBlock.
533 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
535 X86Assembler::JmpDst end = m_jit.label();
536 m_jit.link(wasNotJSFunction, end);
537 if (type == OpCallEval)
538 m_jit.link(wasEval, end);
543 void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
545 bool negated = (type == OpNStrictEq);
547 unsigned dst = instruction[i + 1].u.operand;
548 unsigned src1 = instruction[i + 2].u.operand;
549 unsigned src2 = instruction[i + 3].u.operand;
551 emitGetArg(src1, X86::eax);
552 emitGetArg(src2, X86::edx);
554 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
555 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
556 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
557 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
559 m_jit.cmpl_rr(X86::edx, X86::eax);
561 m_jit.setne_r(X86::eax);
563 m_jit.sete_r(X86::eax);
564 m_jit.movzbl_rr(X86::eax, X86::eax);
565 emitTagAsBoolImmediate(X86::eax);
567 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
569 m_jit.link(firstNotImmediate, m_jit.label());
571 // check that edx is immediate but not the zero immediate
572 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
573 m_jit.setz_r(X86::ecx);
574 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
575 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
576 m_jit.sete_r(X86::edx);
577 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
578 m_jit.orl_rr(X86::ecx, X86::edx);
580 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
582 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
584 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
586 m_jit.link(secondNotImmediate, m_jit.label());
587 // check that eax is not the zero immediate (we know it must be immediate)
588 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
589 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
591 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
593 m_jit.link(bothWereImmediates, m_jit.label());
594 m_jit.link(firstWasNotImmediate, m_jit.label());
599 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
601 m_jit.subl_i8r(1, X86::esi);
602 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
603 emitCall(opcodeIndex, Machine::cti_timeout_check);
605 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
606 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_globalData), X86::ecx, X86::ecx);
607 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
608 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
609 m_jit.link(skipTimeout, m_jit.label());
612 void CTI::privateCompileMainPass()
614 Instruction* instruction = m_codeBlock->instructions.begin();
615 unsigned instructionCount = m_codeBlock->instructions.size();
617 unsigned structureIDInstructionIndex = 0;
619 for (unsigned i = 0; i < instructionCount; ) {
620 m_labels[i] = m_jit.label();
622 #if ENABLE(SAMPLING_TOOL)
623 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
626 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
627 m_jit.emitRestoreArgumentReference();
628 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
630 unsigned src = instruction[i + 2].u.operand;
632 m_jit.movl_i32r(reinterpret_cast<unsigned>(getConstant(m_exec, src)), X86::edx);
634 emitGetArg(src, X86::edx);
635 emitPutResult(instruction[i + 1].u.operand, X86::edx);
640 unsigned dst = instruction[i + 1].u.operand;
641 unsigned src1 = instruction[i + 2].u.operand;
642 unsigned src2 = instruction[i + 3].u.operand;
643 if (isConstant(src2)) {
644 JSValue* value = getConstant(m_exec, src2);
645 if (JSImmediate::isNumber(value)) {
646 emitGetArg(src1, X86::eax);
647 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
648 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
649 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
654 } else if (!isConstant(src1)) {
655 emitGetArg(src1, X86::eax);
656 emitGetArg(src2, X86::edx);
657 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
658 emitFastArithDeTagImmediate(X86::eax);
659 m_jit.addl_rr(X86::edx, X86::eax);
660 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
665 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
666 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
667 emitCall(i, Machine::cti_op_add);
668 emitPutResult(instruction[i + 1].u.operand);
673 if (m_codeBlock->needsFullScopeChain)
674 emitCall(i, Machine::cti_op_end);
675 emitGetArg(instruction[i + 1].u.operand, X86::eax);
676 #if ENABLE(SAMPLING_TOOL)
677 m_jit.movl_i32m(-1, ¤tOpcodeID);
679 m_jit.pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
685 unsigned target = instruction[i + 1].u.operand;
686 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
691 int srcDst = instruction[i + 1].u.operand;
692 emitGetArg(srcDst, X86::eax);
693 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
694 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
695 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
696 emitPutResult(srcDst, X86::eax);
701 emitSlowScriptCheck(i);
703 unsigned target = instruction[i + 1].u.operand;
704 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
708 case op_loop_if_less: {
709 emitSlowScriptCheck(i);
711 unsigned target = instruction[i + 3].u.operand;
712 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
714 emitGetArg(instruction[i + 1].u.operand, X86::edx);
715 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
716 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
717 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
719 emitGetArg(instruction[i + 1].u.operand, X86::eax);
720 emitGetArg(instruction[i + 2].u.operand, X86::edx);
721 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
722 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
723 m_jit.cmpl_rr(X86::edx, X86::eax);
724 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
729 case op_loop_if_lesseq: {
730 emitSlowScriptCheck(i);
732 unsigned target = instruction[i + 3].u.operand;
733 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
735 emitGetArg(instruction[i + 1].u.operand, X86::edx);
736 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
737 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
738 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
740 emitGetArg(instruction[i + 1].u.operand, X86::eax);
741 emitGetArg(instruction[i + 2].u.operand, X86::edx);
742 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
743 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
744 m_jit.cmpl_rr(X86::edx, X86::eax);
745 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
750 case op_new_object: {
751 emitCall(i, Machine::cti_op_new_object);
752 emitPutResult(instruction[i + 1].u.operand);
757 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
758 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
759 // such that the StructureID & offset are always at the same distance from this.
761 emitGetArg(instruction[i + 1].u.operand, X86::eax);
762 emitGetArg(instruction[i + 3].u.operand, X86::edx);
764 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
765 X86Assembler::JmpDst hotPathBegin = m_jit.label();
766 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
767 ++structureIDInstructionIndex;
769 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
770 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
771 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
772 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
773 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
774 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
776 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
777 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
778 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
779 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
785 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
786 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
787 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
788 // to jump back to if one of these trampolies finds a match.
790 emitGetArg(instruction[i + 2].u.operand, X86::eax);
792 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
794 X86Assembler::JmpDst hotPathBegin = m_jit.label();
795 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
796 ++structureIDInstructionIndex;
798 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
799 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
800 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
801 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
802 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
804 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
805 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
806 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
807 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
812 case op_instanceof: {
813 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
814 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
815 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
817 // check if any are immediates
818 m_jit.orl_rr(X86::eax, X86::ecx);
819 m_jit.orl_rr(X86::edx, X86::ecx);
820 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
822 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
824 // check that all are object type - this is a bit of a bithack to avoid excess branching;
825 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
826 // this works because NumberType and StringType are smaller
827 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
828 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
829 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
830 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
831 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
832 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
833 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
834 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
836 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
838 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
839 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
840 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
841 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
843 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
845 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
846 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
848 // optimistically load true result
849 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(true)), X86::eax);
851 X86Assembler::JmpDst loop = m_jit.label();
853 // load value's prototype
854 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
855 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
857 m_jit.cmpl_rr(X86::ecx, X86::edx);
858 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
860 m_jit.cmpl_i32r(reinterpret_cast<int32_t>(jsNull()), X86::ecx);
861 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
862 m_jit.link(goToLoop, loop);
864 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(false)), X86::eax);
866 m_jit.link(exit, m_jit.label());
868 emitPutResult(instruction[i + 1].u.operand);
874 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
875 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
876 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
877 emitCall(i, Machine::cti_op_del_by_id);
878 emitPutResult(instruction[i + 1].u.operand);
883 unsigned dst = instruction[i + 1].u.operand;
884 unsigned src1 = instruction[i + 2].u.operand;
885 unsigned src2 = instruction[i + 3].u.operand;
886 if (isConstant(src1) || isConstant(src2)) {
887 unsigned constant = src1;
888 unsigned nonconstant = src2;
889 if (!isConstant(src1)) {
893 JSValue* value = getConstant(m_exec, constant);
894 if (JSImmediate::isNumber(value)) {
895 emitGetArg(nonconstant, X86::eax);
896 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
897 emitFastArithImmToInt(X86::eax);
898 m_jit.imull_i32r( X86::eax, getDeTaggedConstantImmediate(value), X86::eax);
899 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
900 emitFastArithPotentiallyReTagImmediate(X86::eax);
907 emitGetArg(src1, X86::eax);
908 emitGetArg(src2, X86::edx);
909 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
910 emitFastArithDeTagImmediate(X86::eax);
911 emitFastArithImmToInt(X86::edx);
912 m_jit.imull_rr(X86::edx, X86::eax);
913 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
914 emitFastArithPotentiallyReTagImmediate(X86::eax);
920 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
921 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
922 emitCall(i, Machine::cti_op_new_func);
923 emitPutResult(instruction[i + 1].u.operand);
928 compileOpCall(instruction, i);
932 case op_get_global_var: {
933 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
934 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
935 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
936 emitPutResult(instruction[i + 1].u.operand, X86::eax);
940 case op_put_global_var: {
941 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
942 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
943 emitGetArg(instruction[i + 3].u.operand, X86::edx);
944 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
948 case op_get_scoped_var: {
949 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
951 emitGetCTIParam(CTI_ARGS_scopeChain, X86::eax);
953 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
955 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
956 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
957 emitPutResult(instruction[i + 1].u.operand);
961 case op_put_scoped_var: {
962 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
964 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
965 emitGetArg(instruction[i + 3].u.operand, X86::eax);
967 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
969 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
970 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
975 // Check for an activation - if there is one, jump to the hook below.
976 m_jit.cmpl_i32m(0, RegisterFile::OptionalCalleeActivation * static_cast<int>(sizeof(Register)), X86::edi);
977 X86Assembler::JmpSrc activation = m_jit.emitUnlinkedJne();
978 X86Assembler::JmpDst activated = m_jit.label();
980 // Check for a profiler - if there is one, jump to the hook below.
981 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
982 m_jit.cmpl_i32m(0, X86::eax);
983 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
984 X86Assembler::JmpDst profiled = m_jit.label();
986 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
987 if (m_codeBlock->needsFullScopeChain)
988 emitCall(i, Machine::cti_op_ret_scopeChain);
990 // Return the result in %eax, and the caller scope chain in %edx (this is read from the callee call frame,
991 // but is only assigned to ExecState::m_scopeChain if returning to a JSFunction).
992 emitGetArg(instruction[i + 1].u.operand, X86::eax);
993 m_jit.movl_mr(RegisterFile::CallerScopeChain * static_cast<int>(sizeof(Register)), X86::edi, X86::edx);
994 // Restore the machine return addess from the callframe, roll the callframe back to the caller callframe,
995 // and preserve a copy of r on the stack at CTI_ARGS_r.
996 m_jit.movl_mr(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi, X86::ecx);
997 m_jit.movl_mr(RegisterFile::CallerRegisters * static_cast<int>(sizeof(Register)), X86::edi, X86::edi);
998 emitPutCTIParam(X86::edi, CTI_ARGS_r);
1000 m_jit.pushl_r(X86::ecx);
1004 m_jit.link(activation, m_jit.label());
1005 emitCall(i, Machine::cti_op_ret_activation);
1006 m_jit.link(m_jit.emitUnlinkedJmp(), activated);
1009 m_jit.link(profile, m_jit.label());
1010 emitCall(i, Machine::cti_op_ret_profiler);
1011 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
1016 case op_new_array: {
1017 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1018 emitPutArg(X86::edx, 0);
1019 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1020 emitCall(i, Machine::cti_op_new_array);
1021 emitPutResult(instruction[i + 1].u.operand);
1026 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1027 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1028 emitCall(i, Machine::cti_op_resolve);
1029 emitPutResult(instruction[i + 1].u.operand);
1033 case op_construct: {
1034 compileOpCall(instruction, i, OpConstruct);
1038 case op_construct_verify: {
1039 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1041 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1042 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1043 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1044 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1045 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1047 m_jit.link(isImmediate, m_jit.label());
1048 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
1049 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1050 m_jit.link(isObject, m_jit.label());
1055 case op_get_by_val: {
1056 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1057 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1058 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1059 emitFastArithImmToInt(X86::edx);
1060 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1061 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1062 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1063 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1065 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1066 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1067 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1068 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1070 // Get the value from the vector
1071 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1072 emitPutResult(instruction[i + 1].u.operand);
1076 case op_resolve_func: {
1077 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1078 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1079 emitCall(i, Machine::cti_op_resolve_func);
1080 emitPutResult(instruction[i + 1].u.operand);
1081 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1082 emitPutResult(instruction[i + 2].u.operand);
1087 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1088 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1089 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1090 m_jit.subl_rr(X86::edx, X86::eax);
1091 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1092 emitFastArithReTagImmediate(X86::eax);
1093 emitPutResult(instruction[i + 1].u.operand);
1097 case op_put_by_val: {
1098 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1099 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1100 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1101 emitFastArithImmToInt(X86::edx);
1102 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1103 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1104 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1105 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1107 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1108 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1109 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1110 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1111 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1112 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1113 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1115 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1116 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1117 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1118 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1120 // All good - put the value into the array.
1121 m_jit.link(inFastVector, m_jit.label());
1122 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1123 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1127 CTI_COMPILE_BINARY_OP(op_lesseq)
1128 case op_loop_if_true: {
1129 emitSlowScriptCheck(i);
1131 unsigned target = instruction[i + 2].u.operand;
1132 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1134 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1135 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1136 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1137 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1139 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1140 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1141 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1142 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1144 m_jit.link(isZero, m_jit.label());
1148 case op_resolve_base: {
1149 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1150 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1151 emitCall(i, Machine::cti_op_resolve_base);
1152 emitPutResult(instruction[i + 1].u.operand);
1157 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1158 emitCall(i, Machine::cti_op_negate);
1159 emitPutResult(instruction[i + 1].u.operand);
1163 case op_resolve_skip: {
1164 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1165 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1166 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1167 emitCall(i, Machine::cti_op_resolve_skip);
1168 emitPutResult(instruction[i + 1].u.operand);
1172 case op_resolve_global: {
1174 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1175 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1176 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1177 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1179 // Check StructureID of global object
1180 m_jit.movl_i32r(globalObject, X86::eax);
1181 m_jit.movl_mr(structureIDAddr, X86::edx);
1182 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1183 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1184 m_slowCases.append(SlowCaseEntry(slowCase, i));
1186 // Load cached property
1187 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1188 m_jit.movl_mr(offsetAddr, X86::edx);
1189 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1190 emitPutResult(instruction[i + 1].u.operand);
1191 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1194 m_jit.link(slowCase, m_jit.label());
1195 emitPutArgConstant(globalObject, 0);
1196 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1197 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1198 emitCall(i, Machine::cti_op_resolve_global);
1199 emitPutResult(instruction[i + 1].u.operand);
1200 m_jit.link(end, m_jit.label());
1202 ++structureIDInstructionIndex;
1205 CTI_COMPILE_BINARY_OP(op_div)
1207 int srcDst = instruction[i + 1].u.operand;
1208 emitGetArg(srcDst, X86::eax);
1209 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1210 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1211 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1212 emitPutResult(srcDst, X86::eax);
1217 unsigned target = instruction[i + 3].u.operand;
1218 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1220 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1221 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1222 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1223 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1225 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1226 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1227 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1228 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1229 m_jit.cmpl_rr(X86::edx, X86::eax);
1230 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1236 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1237 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1238 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1239 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1240 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1241 emitPutResult(instruction[i + 1].u.operand);
1246 unsigned target = instruction[i + 2].u.operand;
1247 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1249 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1250 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1251 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1252 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1254 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1255 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1256 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1257 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1259 m_jit.link(isNonZero, m_jit.label());
1264 int srcDst = instruction[i + 2].u.operand;
1265 emitGetArg(srcDst, X86::eax);
1266 m_jit.movl_rr(X86::eax, X86::edx);
1267 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1268 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1269 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1270 emitPutResult(srcDst, X86::edx);
1271 emitPutResult(instruction[i + 1].u.operand);
1275 case op_unexpected_load: {
1276 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1277 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1278 emitPutResult(instruction[i + 1].u.operand);
1283 int retAddrDst = instruction[i + 1].u.operand;
1284 int target = instruction[i + 2].u.operand;
1285 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1286 X86Assembler::JmpDst addrPosition = m_jit.label();
1287 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1288 X86Assembler::JmpDst sretTarget = m_jit.label();
1289 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1294 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1299 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1300 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1301 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1302 m_jit.cmpl_rr(X86::edx, X86::eax);
1303 m_jit.sete_r(X86::eax);
1304 m_jit.movzbl_rr(X86::eax, X86::eax);
1305 emitTagAsBoolImmediate(X86::eax);
1306 emitPutResult(instruction[i + 1].u.operand);
1311 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1312 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1313 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1314 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1315 emitFastArithImmToInt(X86::eax);
1316 emitFastArithImmToInt(X86::ecx);
1317 m_jit.shll_CLr(X86::eax);
1318 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1319 emitPutResult(instruction[i + 1].u.operand);
1324 unsigned src1 = instruction[i + 2].u.operand;
1325 unsigned src2 = instruction[i + 3].u.operand;
1326 unsigned dst = instruction[i + 1].u.operand;
1327 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1328 emitGetArg(src2, X86::eax);
1329 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1330 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1332 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1333 emitGetArg(src1, X86::eax);
1334 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1335 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1338 emitGetArg(src1, X86::eax);
1339 emitGetArg(src2, X86::edx);
1340 m_jit.andl_rr(X86::edx, X86::eax);
1341 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1348 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1349 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1350 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1351 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1352 emitFastArithImmToInt(X86::ecx);
1353 m_jit.sarl_CLr(X86::eax);
1354 emitFastArithPotentiallyReTagImmediate(X86::eax);
1355 emitPutResult(instruction[i + 1].u.operand);
1360 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1361 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1362 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1363 emitPutResult(instruction[i + 1].u.operand);
1367 case op_resolve_with_base: {
1368 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1369 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1370 emitCall(i, Machine::cti_op_resolve_with_base);
1371 emitPutResult(instruction[i + 1].u.operand);
1372 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1373 emitPutResult(instruction[i + 2].u.operand);
1377 case op_new_func_exp: {
1378 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1379 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1380 emitCall(i, Machine::cti_op_new_func_exp);
1381 emitPutResult(instruction[i + 1].u.operand);
1386 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1387 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1388 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1389 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1390 emitFastArithDeTagImmediate(X86::eax);
1391 emitFastArithDeTagImmediate(X86::ecx);
1392 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1394 m_jit.idivl_r(X86::ecx);
1395 emitFastArithReTagImmediate(X86::edx);
1396 m_jit.movl_rr(X86::edx, X86::eax);
1397 emitPutResult(instruction[i + 1].u.operand);
1402 unsigned target = instruction[i + 2].u.operand;
1403 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1405 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1406 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1407 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1408 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1410 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1411 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1412 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1413 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1415 m_jit.link(isZero, m_jit.label());
1419 CTI_COMPILE_BINARY_OP(op_less)
1420 CTI_COMPILE_BINARY_OP(op_neq)
1422 int srcDst = instruction[i + 2].u.operand;
1423 emitGetArg(srcDst, X86::eax);
1424 m_jit.movl_rr(X86::eax, X86::edx);
1425 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1426 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1427 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1428 emitPutResult(srcDst, X86::edx);
1429 emitPutResult(instruction[i + 1].u.operand);
1433 CTI_COMPILE_BINARY_OP(op_urshift)
1435 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1436 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1437 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1438 m_jit.xorl_rr(X86::edx, X86::eax);
1439 emitFastArithReTagImmediate(X86::eax);
1440 emitPutResult(instruction[i + 1].u.operand);
1444 case op_new_regexp: {
1445 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1446 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1447 emitCall(i, Machine::cti_op_new_regexp);
1448 emitPutResult(instruction[i + 1].u.operand);
1453 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1454 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1455 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1456 m_jit.orl_rr(X86::edx, X86::eax);
1457 emitPutResult(instruction[i + 1].u.operand);
1461 case op_call_eval: {
1462 compileOpCall(instruction, i, OpCallEval);
1467 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1468 emitCall(i, Machine::cti_op_throw);
1469 m_jit.addl_i8r(0x24, X86::esp);
1470 m_jit.popl_r(X86::edi);
1471 m_jit.popl_r(X86::esi);
1476 case op_get_pnames: {
1477 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1478 emitCall(i, Machine::cti_op_get_pnames);
1479 emitPutResult(instruction[i + 1].u.operand);
1483 case op_next_pname: {
1484 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1485 unsigned target = instruction[i + 3].u.operand;
1486 emitCall(i, Machine::cti_op_next_pname);
1487 m_jit.testl_rr(X86::eax, X86::eax);
1488 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1489 emitPutResult(instruction[i + 1].u.operand);
1490 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1491 m_jit.link(endOfIter, m_jit.label());
1495 case op_push_scope: {
1496 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1497 emitCall(i, Machine::cti_op_push_scope);
1501 case op_pop_scope: {
1502 emitCall(i, Machine::cti_op_pop_scope);
1506 CTI_COMPILE_UNARY_OP(op_typeof)
1507 CTI_COMPILE_UNARY_OP(op_is_undefined)
1508 CTI_COMPILE_UNARY_OP(op_is_boolean)
1509 CTI_COMPILE_UNARY_OP(op_is_number)
1510 CTI_COMPILE_UNARY_OP(op_is_string)
1511 CTI_COMPILE_UNARY_OP(op_is_object)
1512 CTI_COMPILE_UNARY_OP(op_is_function)
1514 compileOpStrictEq(instruction, i, OpStrictEq);
1518 case op_nstricteq: {
1519 compileOpStrictEq(instruction, i, OpNStrictEq);
1523 case op_to_jsnumber: {
1524 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1525 emitCall(i, Machine::cti_op_to_jsnumber);
1526 emitPutResult(instruction[i + 1].u.operand);
1531 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1532 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1533 emitCall(i, Machine::cti_op_in);
1534 emitPutResult(instruction[i + 1].u.operand);
1538 case op_push_new_scope: {
1539 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1540 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1541 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1542 emitCall(i, Machine::cti_op_push_new_scope);
1543 emitPutResult(instruction[i + 1].u.operand);
1548 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1549 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1550 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_exception), X86::ecx, X86::eax);
1551 m_jit.movl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
1552 emitPutResult(instruction[i + 1].u.operand);
1556 case op_jmp_scopes: {
1557 unsigned count = instruction[i + 1].u.operand;
1558 emitPutArgConstant(count, 0);
1559 emitCall(i, Machine::cti_op_jmp_scopes);
1560 unsigned target = instruction[i + 2].u.operand;
1561 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1565 case op_put_by_index: {
1566 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1567 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1568 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1569 emitCall(i, Machine::cti_op_put_by_index);
1573 case op_switch_imm: {
1574 unsigned tableIndex = instruction[i + 1].u.operand;
1575 unsigned defaultOffset = instruction[i + 2].u.operand;
1576 unsigned scrutinee = instruction[i + 3].u.operand;
1578 // create jump table for switch destinations, track this switch statement.
1579 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1580 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1581 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1583 emitGetPutArg(scrutinee, 0, X86::ecx);
1584 emitPutArgConstant(tableIndex, 4);
1585 emitCall(i, Machine::cti_op_switch_imm);
1586 m_jit.jmp_r(X86::eax);
1590 case op_switch_char: {
1591 unsigned tableIndex = instruction[i + 1].u.operand;
1592 unsigned defaultOffset = instruction[i + 2].u.operand;
1593 unsigned scrutinee = instruction[i + 3].u.operand;
1595 // create jump table for switch destinations, track this switch statement.
1596 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1597 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1598 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1600 emitGetPutArg(scrutinee, 0, X86::ecx);
1601 emitPutArgConstant(tableIndex, 4);
1602 emitCall(i, Machine::cti_op_switch_char);
1603 m_jit.jmp_r(X86::eax);
1607 case op_switch_string: {
1608 unsigned tableIndex = instruction[i + 1].u.operand;
1609 unsigned defaultOffset = instruction[i + 2].u.operand;
1610 unsigned scrutinee = instruction[i + 3].u.operand;
1612 // create jump table for switch destinations, track this switch statement.
1613 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1614 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1616 emitGetPutArg(scrutinee, 0, X86::ecx);
1617 emitPutArgConstant(tableIndex, 4);
1618 emitCall(i, Machine::cti_op_switch_string);
1619 m_jit.jmp_r(X86::eax);
1623 case op_del_by_val: {
1624 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1625 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1626 emitCall(i, Machine::cti_op_del_by_val);
1627 emitPutResult(instruction[i + 1].u.operand);
1631 case op_put_getter: {
1632 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1633 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1634 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1635 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1636 emitCall(i, Machine::cti_op_put_getter);
1640 case op_put_setter: {
1641 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1642 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1643 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1644 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1645 emitCall(i, Machine::cti_op_put_setter);
1649 case op_new_error: {
1650 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1651 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1652 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1653 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1654 emitCall(i, Machine::cti_op_new_error);
1655 emitPutResult(instruction[i + 1].u.operand);
1660 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1661 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1662 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1663 emitCall(i, Machine::cti_op_debug);
1668 unsigned dst = instruction[i + 1].u.operand;
1669 unsigned src1 = instruction[i + 2].u.operand;
1671 emitGetArg(src1, X86::eax);
1672 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1673 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1675 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1676 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1677 m_jit.setnz_r(X86::eax);
1679 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1681 m_jit.link(isImmediate, m_jit.label());
1683 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1684 m_jit.andl_rr(X86::eax, X86::ecx);
1685 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1686 m_jit.sete_r(X86::eax);
1688 m_jit.link(wasNotImmediate, m_jit.label());
1690 m_jit.movzbl_rr(X86::eax, X86::eax);
1691 emitTagAsBoolImmediate(X86::eax);
1698 unsigned dst = instruction[i + 1].u.operand;
1699 unsigned src1 = instruction[i + 2].u.operand;
1701 emitGetArg(src1, X86::eax);
1702 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1703 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1705 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1706 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1707 m_jit.setz_r(X86::eax);
1709 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1711 m_jit.link(isImmediate, m_jit.label());
1713 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1714 m_jit.andl_rr(X86::eax, X86::ecx);
1715 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1716 m_jit.setne_r(X86::eax);
1718 m_jit.link(wasNotImmediate, m_jit.label());
1720 m_jit.movzbl_rr(X86::eax, X86::eax);
1721 emitTagAsBoolImmediate(X86::eax);
1728 // Even though CTI doesn't use them, we initialize our constant
1729 // registers to zap stale pointers, to avoid unnecessarily prolonging
1730 // object lifetime and increasing GC pressure.
1731 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
1732 for (size_t j = 0; j < count; ++j)
1733 emitInitRegister(j);
1738 case op_get_array_length:
1739 case op_get_by_id_chain:
1740 case op_get_by_id_generic:
1741 case op_get_by_id_proto:
1742 case op_get_by_id_self:
1743 case op_get_string_length:
1744 case op_put_by_id_generic:
1745 case op_put_by_id_replace:
1746 case op_put_by_id_transition:
1747 ASSERT_NOT_REACHED();
1751 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1755 void CTI::privateCompileLinkPass()
1757 unsigned jmpTableCount = m_jmpTable.size();
1758 for (unsigned i = 0; i < jmpTableCount; ++i)
1759 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
1763 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
1765 m_jit.link(iter->from, m_jit.label()); \
1766 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
1767 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
1768 emitCall(i, Machine::cti_##name); \
1769 emitPutResult(instruction[i + 1].u.operand); \
1774 void CTI::privateCompileSlowCases()
1776 unsigned structureIDInstructionIndex = 0;
1778 Instruction* instruction = m_codeBlock->instructions.begin();
1779 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
1780 unsigned i = iter->to;
1781 m_jit.emitRestoreArgumentReference();
1782 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
1784 unsigned dst = instruction[i + 1].u.operand;
1785 unsigned src2 = instruction[i + 3].u.operand;
1786 if (isConstant(src2)) {
1787 JSValue* value = getConstant(m_exec, src2);
1788 if (JSImmediate::isNumber(value)) {
1789 X86Assembler::JmpSrc notImm = iter->from;
1790 m_jit.link((++iter)->from, m_jit.label());
1791 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1792 m_jit.link(notImm, m_jit.label());
1793 emitPutArg(X86::eax, 0);
1794 emitGetPutArg(src2, 4, X86::ecx);
1795 emitCall(i, Machine::cti_op_add);
1802 ASSERT(!isConstant(instruction[i + 2].u.operand));
1804 X86Assembler::JmpSrc notImm = iter->from;
1805 m_jit.link((++iter)->from, m_jit.label());
1806 m_jit.subl_rr(X86::edx, X86::eax);
1807 emitFastArithReTagImmediate(X86::eax);
1808 m_jit.link(notImm, m_jit.label());
1809 emitPutArg(X86::eax, 0);
1810 emitPutArg(X86::edx, 4);
1811 emitCall(i, Machine::cti_op_add);
1816 case op_get_by_val: {
1817 // The slow case that handles accesses to arrays (below) may jump back up to here.
1818 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
1820 X86Assembler::JmpSrc notImm = iter->from;
1821 m_jit.link((++iter)->from, m_jit.label());
1822 m_jit.link((++iter)->from, m_jit.label());
1823 emitFastArithIntToImmNoCheck(X86::edx);
1824 m_jit.link(notImm, m_jit.label());
1825 emitPutArg(X86::eax, 0);
1826 emitPutArg(X86::edx, 4);
1827 emitCall(i, Machine::cti_op_get_by_val);
1828 emitPutResult(instruction[i + 1].u.operand);
1829 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1831 // This is slow case that handles accesses to arrays above the fast cut-off.
1832 // First, check if this is an access to the vector
1833 m_jit.link((++iter)->from, m_jit.label());
1834 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1835 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
1837 // okay, missed the fast region, but it is still in the vector. Get the value.
1838 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
1839 // Check whether the value loaded is zero; if so we need to return undefined.
1840 m_jit.testl_rr(X86::ecx, X86::ecx);
1841 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
1842 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1848 X86Assembler::JmpSrc notImm = iter->from;
1849 m_jit.link((++iter)->from, m_jit.label());
1850 m_jit.addl_rr(X86::edx, X86::eax);
1851 m_jit.link(notImm, m_jit.label());
1852 emitPutArg(X86::eax, 0);
1853 emitPutArg(X86::edx, 4);
1854 emitCall(i, Machine::cti_op_sub);
1855 emitPutResult(instruction[i + 1].u.operand);
1860 m_jit.link(iter->from, m_jit.label());
1861 m_jit.link((++iter)->from, m_jit.label());
1862 emitPutArg(X86::eax, 0);
1863 emitPutArg(X86::ecx, 4);
1864 emitCall(i, Machine::cti_op_rshift);
1865 emitPutResult(instruction[i + 1].u.operand);
1870 X86Assembler::JmpSrc notImm1 = iter->from;
1871 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1872 m_jit.link((++iter)->from, m_jit.label());
1873 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1874 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1875 m_jit.link(notImm1, m_jit.label());
1876 m_jit.link(notImm2, m_jit.label());
1877 emitPutArg(X86::eax, 0);
1878 emitPutArg(X86::ecx, 4);
1879 emitCall(i, Machine::cti_op_lshift);
1880 emitPutResult(instruction[i + 1].u.operand);
1884 case op_loop_if_less: {
1885 emitSlowScriptCheck(i);
1887 unsigned target = instruction[i + 3].u.operand;
1888 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1890 m_jit.link(iter->from, m_jit.label());
1891 emitPutArg(X86::edx, 0);
1892 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1893 emitCall(i, Machine::cti_op_loop_if_less);
1894 m_jit.testl_rr(X86::eax, X86::eax);
1895 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1897 m_jit.link(iter->from, m_jit.label());
1898 m_jit.link((++iter)->from, m_jit.label());
1899 emitPutArg(X86::eax, 0);
1900 emitPutArg(X86::edx, 4);
1901 emitCall(i, Machine::cti_op_loop_if_less);
1902 m_jit.testl_rr(X86::eax, X86::eax);
1903 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1908 case op_put_by_id: {
1909 m_jit.link(iter->from, m_jit.label());
1910 m_jit.link((++iter)->from, m_jit.label());
1912 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1913 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1914 emitPutArg(X86::eax, 0);
1915 emitPutArg(X86::edx, 8);
1916 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
1918 // Track the location of the call; this will be used to recover repatch information.
1919 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1920 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1921 ++structureIDInstructionIndex;
1926 case op_get_by_id: {
1927 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1928 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1929 // of the call (which we can use to look up the repatch information), but should a array-length or
1930 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back
1931 // the distance from the call to the head of the slow case.
1933 m_jit.link(iter->from, m_jit.label());
1934 m_jit.link((++iter)->from, m_jit.label());
1937 X86Assembler::JmpDst coldPathBegin = m_jit.label();
1939 emitPutArg(X86::eax, 0);
1940 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1941 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1942 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
1943 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
1944 emitPutResult(instruction[i + 1].u.operand);
1946 // Track the location of the call; this will be used to recover repatch information.
1947 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1948 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1949 ++structureIDInstructionIndex;
1954 case op_resolve_global: {
1955 ++structureIDInstructionIndex;
1959 case op_loop_if_lesseq: {
1960 emitSlowScriptCheck(i);
1962 unsigned target = instruction[i + 3].u.operand;
1963 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1965 m_jit.link(iter->from, m_jit.label());
1966 emitPutArg(X86::edx, 0);
1967 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1968 emitCall(i, Machine::cti_op_loop_if_lesseq);
1969 m_jit.testl_rr(X86::eax, X86::eax);
1970 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1972 m_jit.link(iter->from, m_jit.label());
1973 m_jit.link((++iter)->from, m_jit.label());
1974 emitPutArg(X86::eax, 0);
1975 emitPutArg(X86::edx, 4);
1976 emitCall(i, Machine::cti_op_loop_if_lesseq);
1977 m_jit.testl_rr(X86::eax, X86::eax);
1978 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1984 unsigned srcDst = instruction[i + 1].u.operand;
1985 X86Assembler::JmpSrc notImm = iter->from;
1986 m_jit.link((++iter)->from, m_jit.label());
1987 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1988 m_jit.link(notImm, m_jit.label());
1989 emitPutArg(X86::eax, 0);
1990 emitCall(i, Machine::cti_op_pre_inc);
1991 emitPutResult(srcDst);
1995 case op_put_by_val: {
1996 // Normal slow cases - either is not an immediate imm, or is an array.
1997 X86Assembler::JmpSrc notImm = iter->from;
1998 m_jit.link((++iter)->from, m_jit.label());
1999 m_jit.link((++iter)->from, m_jit.label());
2000 emitFastArithIntToImmNoCheck(X86::edx);
2001 m_jit.link(notImm, m_jit.label());
2002 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2003 emitPutArg(X86::eax, 0);
2004 emitPutArg(X86::edx, 4);
2005 emitPutArg(X86::ecx, 8);
2006 emitCall(i, Machine::cti_op_put_by_val);
2007 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2009 // slow cases for immediate int accesses to arrays
2010 m_jit.link((++iter)->from, m_jit.label());
2011 m_jit.link((++iter)->from, m_jit.label());
2012 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2013 emitPutArg(X86::eax, 0);
2014 emitPutArg(X86::edx, 4);
2015 emitPutArg(X86::ecx, 8);
2016 emitCall(i, Machine::cti_op_put_by_val_array);
2021 case op_loop_if_true: {
2022 emitSlowScriptCheck(i);
2024 m_jit.link(iter->from, m_jit.label());
2025 emitPutArg(X86::eax, 0);
2026 emitCall(i, Machine::cti_op_jtrue);
2027 m_jit.testl_rr(X86::eax, X86::eax);
2028 unsigned target = instruction[i + 2].u.operand;
2029 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2034 unsigned srcDst = instruction[i + 1].u.operand;
2035 X86Assembler::JmpSrc notImm = iter->from;
2036 m_jit.link((++iter)->from, m_jit.label());
2037 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2038 m_jit.link(notImm, m_jit.label());
2039 emitPutArg(X86::eax, 0);
2040 emitCall(i, Machine::cti_op_pre_dec);
2041 emitPutResult(srcDst);
2046 unsigned target = instruction[i + 3].u.operand;
2047 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2049 m_jit.link(iter->from, m_jit.label());
2050 emitPutArg(X86::edx, 0);
2051 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2052 emitCall(i, Machine::cti_op_jless);
2053 m_jit.testl_rr(X86::eax, X86::eax);
2054 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2056 m_jit.link(iter->from, m_jit.label());
2057 m_jit.link((++iter)->from, m_jit.label());
2058 emitPutArg(X86::eax, 0);
2059 emitPutArg(X86::edx, 4);
2060 emitCall(i, Machine::cti_op_jless);
2061 m_jit.testl_rr(X86::eax, X86::eax);
2062 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2068 m_jit.link(iter->from, m_jit.label());
2069 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2070 emitPutArg(X86::eax, 0);
2071 emitCall(i, Machine::cti_op_not);
2072 emitPutResult(instruction[i + 1].u.operand);
2077 m_jit.link(iter->from, m_jit.label());
2078 emitPutArg(X86::eax, 0);
2079 emitCall(i, Machine::cti_op_jtrue);
2080 m_jit.testl_rr(X86::eax, X86::eax);
2081 unsigned target = instruction[i + 2].u.operand;
2082 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2087 unsigned srcDst = instruction[i + 2].u.operand;
2088 m_jit.link(iter->from, m_jit.label());
2089 m_jit.link((++iter)->from, m_jit.label());
2090 emitPutArg(X86::eax, 0);
2091 emitCall(i, Machine::cti_op_post_inc);
2092 emitPutResult(instruction[i + 1].u.operand);
2093 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2094 emitPutResult(srcDst);
2099 m_jit.link(iter->from, m_jit.label());
2100 emitPutArg(X86::eax, 0);
2101 emitCall(i, Machine::cti_op_bitnot);
2102 emitPutResult(instruction[i + 1].u.operand);
2107 unsigned src1 = instruction[i + 2].u.operand;
2108 unsigned src2 = instruction[i + 3].u.operand;
2109 unsigned dst = instruction[i + 1].u.operand;
2110 if (getConstantImmediateNumericArg(src1)) {
2111 m_jit.link(iter->from, m_jit.label());
2112 emitGetPutArg(src1, 0, X86::ecx);
2113 emitPutArg(X86::eax, 4);
2114 emitCall(i, Machine::cti_op_bitand);
2116 } else if (getConstantImmediateNumericArg(src2)) {
2117 m_jit.link(iter->from, m_jit.label());
2118 emitPutArg(X86::eax, 0);
2119 emitGetPutArg(src2, 4, X86::ecx);
2120 emitCall(i, Machine::cti_op_bitand);
2123 m_jit.link(iter->from, m_jit.label());
2124 emitGetPutArg(src1, 0, X86::ecx);
2125 emitPutArg(X86::edx, 4);
2126 emitCall(i, Machine::cti_op_bitand);
2133 m_jit.link(iter->from, m_jit.label());
2134 emitPutArg(X86::eax, 0);
2135 emitCall(i, Machine::cti_op_jtrue);
2136 m_jit.testl_rr(X86::eax, X86::eax);
2137 unsigned target = instruction[i + 2].u.operand;
2138 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2143 unsigned srcDst = instruction[i + 2].u.operand;
2144 m_jit.link(iter->from, m_jit.label());
2145 m_jit.link((++iter)->from, m_jit.label());
2146 emitPutArg(X86::eax, 0);
2147 emitCall(i, Machine::cti_op_post_dec);
2148 emitPutResult(instruction[i + 1].u.operand);
2149 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2150 emitPutResult(srcDst);
2155 m_jit.link(iter->from, m_jit.label());
2156 emitPutArg(X86::eax, 0);
2157 emitPutArg(X86::edx, 4);
2158 emitCall(i, Machine::cti_op_bitxor);
2159 emitPutResult(instruction[i + 1].u.operand);
2164 m_jit.link(iter->from, m_jit.label());
2165 emitPutArg(X86::eax, 0);
2166 emitPutArg(X86::edx, 4);
2167 emitCall(i, Machine::cti_op_bitor);
2168 emitPutResult(instruction[i + 1].u.operand);
2173 m_jit.link(iter->from, m_jit.label());
2174 emitPutArg(X86::eax, 0);
2175 emitPutArg(X86::edx, 4);
2176 emitCall(i, Machine::cti_op_eq);
2177 emitPutResult(instruction[i + 1].u.operand);
2181 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2182 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2183 case op_instanceof: {
2184 m_jit.link(iter->from, m_jit.label());
2185 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2186 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2187 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2188 emitCall(i, Machine::cti_op_instanceof);
2189 emitPutResult(instruction[i + 1].u.operand);
2194 X86Assembler::JmpSrc notImm1 = iter->from;
2195 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2196 m_jit.link((++iter)->from, m_jit.label());
2197 emitFastArithReTagImmediate(X86::eax);
2198 emitFastArithReTagImmediate(X86::ecx);
2199 m_jit.link(notImm1, m_jit.label());
2200 m_jit.link(notImm2, m_jit.label());
2201 emitPutArg(X86::eax, 0);
2202 emitPutArg(X86::ecx, 4);
2203 emitCall(i, Machine::cti_op_mod);
2204 emitPutResult(instruction[i + 1].u.operand);
2208 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_mul);
2211 case op_construct: {
2212 m_jit.link(iter->from, m_jit.label());
2213 m_jit.emitRestoreArgumentReference();
2215 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2216 emitCall(i, Machine::cti_vm_compile);
2217 emitCall(i, X86::eax);
2219 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2221 // In the interpreter the following actions are performed by op_ret:
2223 // Restore ExecState::m_scopeChain and CTI_ARGS_scopeChain. NOTE: After
2224 // op_ret, %edx holds the caller's scope chain.
2225 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
2226 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
2227 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
2228 // Restore ExecState::m_callFrame.
2229 m_jit.movl_rm(X86::edi, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
2230 // Restore CTI_ARGS_codeBlock.
2231 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
2233 emitPutResult(instruction[i + 1].u.operand);
2239 ASSERT_NOT_REACHED();
2243 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2246 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2249 void CTI::privateCompile()
2251 // Could use a popl_m, but would need to offset the following instruction if so.
2252 m_jit.popl_r(X86::ecx);
2253 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
2254 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2256 // Lazy copy of the scopeChain
2257 X86Assembler::JmpSrc callToUpdateScopeChain;
2258 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain) {
2259 m_jit.emitRestoreArgumentReference();
2260 callToUpdateScopeChain = m_jit.emitCall();
2263 privateCompileMainPass();
2264 privateCompileLinkPass();
2265 privateCompileSlowCases();
2267 ASSERT(m_jmpTable.isEmpty());
2269 void* code = m_jit.copy();
2272 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2273 for (unsigned i = 0; i < m_switches.size(); ++i) {
2274 SwitchRecord record = m_switches[i];
2275 unsigned opcodeIndex = record.m_opcodeIndex;
2277 if (record.m_type != SwitchRecord::String) {
2278 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2279 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2281 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2283 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2284 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2285 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2288 ASSERT(record.m_type == SwitchRecord::String);
2290 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2292 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2293 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2294 unsigned offset = it->second.branchOffset;
2295 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2300 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2301 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2303 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2305 X86Assembler::link(code, iter->from, iter->to);
2306 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2309 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain)
2310 X86Assembler::link(code, callToUpdateScopeChain, (void*)Machine::cti_vm_updateScopeChain);
2312 // Link absolute addresses for jsr
2313 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2314 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2316 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2317 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2318 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2319 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2322 m_codeBlock->ctiCode = code;
2325 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2327 // Check eax is an object of the right StructureID.
2328 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2329 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2330 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2331 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2333 // Checks out okay! - getDirectOffset
2334 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2335 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2338 void* code = m_jit.copy();
2341 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2342 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2344 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2346 ctiRepatchCallByReturnAddress(returnAddress, code);
2349 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2351 #if USE(CTI_REPATCH_PIC)
2352 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2354 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2355 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2357 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2358 // referencing the prototype object - let's speculatively load it's table nice and early!)
2359 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2360 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2361 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2363 // check eax is an object of the right StructureID.
2364 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2365 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2366 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2367 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2369 // Check the prototype object's StructureID had not changed.
2370 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2371 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2372 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2374 // Checks out okay! - getDirectOffset
2375 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2377 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2379 void* code = m_jit.copy();
2382 // Use the repatch information to link the failure cases back to the original slow case routine.
2383 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2384 X86Assembler::link(code, failureCases1, slowCaseBegin);
2385 X86Assembler::link(code, failureCases2, slowCaseBegin);
2386 X86Assembler::link(code, failureCases3, slowCaseBegin);
2388 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2389 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2390 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2392 // Track the stub we have created so that it will be deleted later.
2393 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2395 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2396 // FIXME: should revert this repatching, on failure.
2397 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2398 X86Assembler::repatchBranchOffset(jmpLocation, code);
2400 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2401 // referencing the prototype object - let's speculatively load it's table nice and early!)
2402 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2403 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2404 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2406 // check eax is an object of the right StructureID.
2407 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2408 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2409 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2410 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2412 // Check the prototype object's StructureID had not changed.
2413 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2414 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2415 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2417 // Checks out okay! - getDirectOffset
2418 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2422 void* code = m_jit.copy();
2425 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2426 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2427 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2429 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2431 ctiRepatchCallByReturnAddress(returnAddress, code);
2435 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2439 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2441 // Check eax is an object of the right StructureID.
2442 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2443 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2444 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2445 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2447 StructureID* currStructureID = structureID;
2448 RefPtr<StructureID>* chainEntries = chain->head();
2449 JSObject* protoObject = 0;
2450 for (unsigned i = 0; i<count; ++i) {
2451 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2452 currStructureID = chainEntries[i].get();
2454 // Check the prototype object's StructureID had not changed.
2455 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2456 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2457 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2459 ASSERT(protoObject);
2461 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2462 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2463 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2466 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2468 void* code = m_jit.copy();
2471 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2472 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2474 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2476 ctiRepatchCallByReturnAddress(returnAddress, code);
2479 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2481 // check eax is an object of the right StructureID.
2482 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2483 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2484 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2485 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2487 // checks out okay! - putDirectOffset
2488 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2489 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2492 void* code = m_jit.copy();
2495 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2496 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2498 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2500 ctiRepatchCallByReturnAddress(returnAddress, code);
2505 static JSValue* SFX_CALL transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2507 StructureID* oldStructureID = newStructureID->previousID();
2509 baseObject->transitionTo(newStructureID);
2511 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2512 baseObject->allocatePropertyStorage(oldStructureID->propertyMap().storageSize(), oldStructureID->propertyMap().size());
2514 baseObject->putDirectOffset(cachedOffset, value);
2520 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2522 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2525 if (oldStructureID->propertyMap().storageSize() < JSObject::inlineStorageCapacity)
2528 if (oldStructureID->propertyMap().size() != newStructureID->propertyMap().size())
2534 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2536 Vector<X86Assembler::JmpSrc, 16> failureCases;
2537 // check eax is an object of the right StructureID.
2538 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2539 failureCases.append(m_jit.emitUnlinkedJne());
2540 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2541 failureCases.append(m_jit.emitUnlinkedJne());
2542 Vector<X86Assembler::JmpSrc> successCases;
2545 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2546 // proto(ecx) = baseObject->structureID()->prototype()
2547 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2548 failureCases.append(m_jit.emitUnlinkedJne());
2549 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2551 // ecx = baseObject->m_structureID
2552 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2553 // null check the prototype
2554 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2555 successCases.append(m_jit.emitUnlinkedJe());
2557 // Check the structure id
2558 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2559 failureCases.append(m_jit.emitUnlinkedJne());
2561 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2562 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2563 failureCases.append(m_jit.emitUnlinkedJne());
2564 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2567 failureCases.append(m_jit.emitUnlinkedJne());
2568 for (unsigned i = 0; i < successCases.size(); ++i)
2569 m_jit.link(successCases[i], m_jit.label());
2571 X86Assembler::JmpSrc callTarget;
2572 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2573 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2574 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2575 // codeblock should ensure oldStructureID->m_refCount > 0
2576 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2577 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2578 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2581 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2582 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2584 // Slow case transition -- we're going to need to quite a bit of work,
2585 // so just make a call
2586 m_jit.pushl_r(X86::edx);
2587 m_jit.pushl_r(X86::eax);
2588 m_jit.movl_i32r(cachedOffset, X86::eax);
2589 m_jit.pushl_r(X86::eax);
2590 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2591 m_jit.pushl_r(X86::eax);
2592 callTarget = m_jit.emitCall();
2593 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2596 void* code = m_jit.copy();
2599 for (unsigned i = 0; i < failureCases.size(); ++i)
2600 X86Assembler::link(code, failureCases[i], reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2602 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2603 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2605 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2607 ctiRepatchCallByReturnAddress(returnAddress, code);
2610 void* CTI::privateCompileArrayLengthTrampoline()
2612 // Check eax is an array
2613 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2614 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2615 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2616 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2618 // Checks out okay! - get the length from the storage
2619 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2620 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2622 m_jit.addl_rr(X86::eax, X86::eax);
2623 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2624 m_jit.addl_i8r(1, X86::eax);
2628 void* code = m_jit.copy();
2631 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2632 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2633 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2638 void* CTI::privateCompileStringLengthTrampoline()
2640 // Check eax is a string
2641 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2642 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2643 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2644 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2646 // Checks out okay! - get the length from the Ustring.
2647 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2648 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2650 m_jit.addl_rr(X86::eax, X86::eax);
2651 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2652 m_jit.addl_i8r(1, X86::eax);
2656 void* code = m_jit.copy();
2659 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2660 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2661 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2666 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2668 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2670 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2671 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2672 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2674 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2675 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2676 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2679 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2681 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2683 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2684 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2685 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2687 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2688 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2689 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2692 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2694 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2696 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2697 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2699 // Check eax is an array
2700 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2701 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2702 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2703 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2705 // Checks out okay! - get the length from the storage
2706 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2707 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2709 m_jit.addl_rr(X86::ecx, X86::ecx);
2710 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2711 m_jit.addl_i8r(1, X86::ecx);
2713 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2715 void* code = m_jit.copy();
2718 // Use the repatch information to link the failure cases back to the original slow case routine.
2719 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2720 X86Assembler::link(code, failureCases1, slowCaseBegin);
2721 X86Assembler::link(code, failureCases2, slowCaseBegin);
2722 X86Assembler::link(code, failureCases3, slowCaseBegin);
2724 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2725 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2726 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2728 // Track the stub we have created so that it will be deleted later.
2729 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2731 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2732 // FIXME: should revert this repatching, on failure.
2733 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2734 X86Assembler::repatchBranchOffset(jmpLocation, code);
2737 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
2739 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
2740 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
2741 m_jit.movl_mr(index * sizeof(Register), dst, dst);
2744 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
2746 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
2747 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
2748 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
2753 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
2755 // TODO: better error messages
2756 if (pattern.size() > MaxPatternSize) {
2757 *error_ptr = "regular expression too large";
2761 X86Assembler jit(exec->machine()->jitCodeBuffer());
2762 WRECParser parser(pattern, ignoreCase, multiline, jit);
2764 jit.emitConvertToFastCall();
2766 // Preserve regs & initialize outputRegister.
2767 jit.pushl_r(WRECGenerator::outputRegister);
2768 jit.pushl_r(WRECGenerator::currentValueRegister);
2769 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
2770 jit.pushl_r(WRECGenerator::currentPositionRegister);
2771 // load output pointer
2776 , X86::esp, WRECGenerator::outputRegister);
2778 // restart point on match fail.
2779 WRECGenerator::JmpDst nextLabel = jit.label();
2781 // (1) Parse Disjunction:
2783 // Parsing the disjunction should fully consume the pattern.
2784 JmpSrcVector failures;
2785 parser.parseDisjunction(failures);
2786 if (parser.isEndOfPattern()) {
2787 parser.m_err = WRECParser::Error_malformedPattern;
2790 // TODO: better error messages
2791 *error_ptr = "TODO: better error messages";
2796 // Set return value & pop registers from the stack.
2798 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
2799 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
2801 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
2802 jit.popl_r(X86::eax);
2803 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2804 jit.popl_r(WRECGenerator::currentValueRegister);
2805 jit.popl_r(WRECGenerator::outputRegister);
2808 jit.link(noOutput, jit.label());
2810 jit.popl_r(X86::eax);
2811 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2812 jit.popl_r(WRECGenerator::currentValueRegister);
2813 jit.popl_r(WRECGenerator::outputRegister);
2817 // All fails link to here. Progress the start point & if it is within scope, loop.
2818 // Otherwise, return fail value.
2819 WRECGenerator::JmpDst here = jit.label();
2820 for (unsigned i = 0; i < failures.size(); ++i)
2821 jit.link(failures[i], here);
2824 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
2825 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
2826 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
2827 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
2828 jit.link(jit.emitUnlinkedJle(), nextLabel);
2830 jit.addl_i8r(4, X86::esp);
2832 jit.movl_i32r(-1, X86::eax);
2833 jit.popl_r(WRECGenerator::currentValueRegister);
2834 jit.popl_r(WRECGenerator::outputRegister);
2837 *numSubpatterns_ptr = parser.m_numSubpatterns;
2839 void* code = jit.copy();
2844 #endif // ENABLE(WREC)
2848 #endif // ENABLE(CTI)