2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
34 #include "wrec/WREC.h"
40 #if COMPILER(GCC) && PLATFORM(X86)
42 ".globl _ctiTrampoline" "\n"
43 "_ctiTrampoline:" "\n"
46 "subl $0x24, %esp" "\n"
47 "movl $512, %esi" "\n"
48 "call *0x30(%esp)" "\n" //Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
49 "addl $0x24, %esp" "\n"
56 ".globl _ctiVMThrowTrampoline" "\n"
57 "_ctiVMThrowTrampoline:" "\n"
59 "movl 0x34(%esp), %ecx" "\n" //Ox34 = 0x0D * 4, 0x0D = CTI_ARGS_exec
60 "cmpl $0, 8(%ecx)" "\n"
65 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
66 "addl $0x24, %esp" "\n"
76 __declspec(naked) JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**)
92 __declspec(naked) void ctiVMThrowTrampoline()
96 call JSC::Machine::cti_vm_throw;
109 // get arg puts an arg from the SF register array into a h/w register
110 ALWAYS_INLINE void CTI::emitGetArg(unsigned src, X86Assembler::RegisterID dst)
112 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
113 if (src < m_codeBlock->constantRegisters.size()) {
114 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
115 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
117 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
120 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
121 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
123 if (src < m_codeBlock->constantRegisters.size()) {
124 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
125 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
127 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
128 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
132 // puts an arg onto the stack, as an arg to a context threaded function.
133 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
135 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
138 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
140 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
143 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
145 if (src < m_codeBlock->constantRegisters.size()) {
146 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
147 return JSImmediate::isNumber(js) ? js : 0;
152 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
154 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
157 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
159 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
162 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
164 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
167 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
169 m_jit.movl_rm(from, -((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi);
172 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
174 m_jit.movl_mr(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi, to);
177 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
179 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
180 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
183 ALWAYS_INLINE void CTI::emitInitialiseRegister(unsigned dst)
185 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
186 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
189 #if ENABLE(SAMPLING_TOOL)
190 unsigned inCalledCode = 0;
193 void ctiSetReturnAddress(void** where, void* what)
198 void ctiRepatchCallByReturnAddress(void* where, void* what)
200 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
205 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
211 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
213 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
214 m_jit.cmpl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
215 X86Assembler::JmpSrc noException = m_jit.emitUnlinkedJe();
217 m_jit.link(noException, m_jit.label());
220 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
223 if (src1 < m_codeBlock->constantRegisters.size()) {
224 JSValue* js = m_codeBlock->constantRegisters[src1].jsValue(m_exec);
226 JSImmediate::isImmediate(js) ?
227 (JSImmediate::isNumber(js) ? 'i' :
228 JSImmediate::isBoolean(js) ? 'b' :
229 js->isUndefined() ? 'u' :
230 js->isNull() ? 'n' : '?')
232 (js->isString() ? 's' :
233 js->isObject() ? 'o' :
237 if (src2 < m_codeBlock->constantRegisters.size()) {
238 JSValue* js = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
240 JSImmediate::isImmediate(js) ?
241 (JSImmediate::isNumber(js) ? 'i' :
242 JSImmediate::isBoolean(js) ? 'b' :
243 js->isUndefined() ? 'u' :
244 js->isNull() ? 'n' : '?')
246 (js->isString() ? 's' :
247 js->isObject() ? 'o' :
250 if ((which1 != '*') | (which2 != '*'))
251 fprintf(stderr, "Types %c %c\n", which1, which2);
256 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
258 #if ENABLE(SAMPLING_TOOL)
259 m_jit.movl_i32m(1, &inCalledCode);
261 X86Assembler::JmpSrc call = m_jit.emitCall();
262 m_calls.append(CallRecord(call, helper, opcodeIndex));
263 emitDebugExceptionCheck();
264 #if ENABLE(SAMPLING_TOOL)
265 m_jit.movl_i32m(0, &inCalledCode);
271 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
273 #if ENABLE(SAMPLING_TOOL)
274 m_jit.movl_i32m(1, &inCalledCode);
276 X86Assembler::JmpSrc call = m_jit.emitCall();
277 m_calls.append(CallRecord(call, helper, opcodeIndex));
278 emitDebugExceptionCheck();
279 #if ENABLE(SAMPLING_TOOL)
280 m_jit.movl_i32m(0, &inCalledCode);
286 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
288 #if ENABLE(SAMPLING_TOOL)
289 m_jit.movl_i32m(1, &inCalledCode);
291 X86Assembler::JmpSrc call = m_jit.emitCall();
292 m_calls.append(CallRecord(call, helper, opcodeIndex));
293 emitDebugExceptionCheck();
294 #if ENABLE(SAMPLING_TOOL)
295 m_jit.movl_i32m(0, &inCalledCode);
301 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
303 #if ENABLE(SAMPLING_TOOL)
304 m_jit.movl_i32m(1, &inCalledCode);
306 X86Assembler::JmpSrc call = m_jit.emitCall();
307 m_calls.append(CallRecord(call, helper, opcodeIndex));
308 emitDebugExceptionCheck();
309 #if ENABLE(SAMPLING_TOOL)
310 m_jit.movl_i32m(0, &inCalledCode);
316 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
318 #if ENABLE(SAMPLING_TOOL)
319 m_jit.movl_i32m(1, &inCalledCode);
321 X86Assembler::JmpSrc call = m_jit.emitCall();
322 m_calls.append(CallRecord(call, helper, opcodeIndex));
323 emitDebugExceptionCheck();
324 #if ENABLE(SAMPLING_TOOL)
325 m_jit.movl_i32m(0, &inCalledCode);
331 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
333 m_jit.testl_i32r(JSImmediate::TagMask, reg);
334 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
337 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
339 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
340 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
343 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
345 m_jit.movl_rr(reg1, X86::ecx);
346 m_jit.andl_rr(reg2, X86::ecx);
347 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
350 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
352 ASSERT(JSImmediate::isNumber(imm));
353 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
356 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
358 // op_mod relies on this being a sub - setting zf if result is 0.
359 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
362 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
364 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
367 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
369 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
372 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
374 m_jit.sarl_i8r(1, reg);
377 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
379 m_jit.addl_rr(reg, reg);
380 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
381 emitFastArithReTagImmediate(reg);
384 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
386 m_jit.addl_rr(reg, reg);
387 emitFastArithReTagImmediate(reg);
390 ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
392 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
393 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
396 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
397 : m_jit(machine->jitCodeBuffer())
400 , m_codeBlock(codeBlock)
401 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
402 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
406 #define CTI_COMPILE_BINARY_OP(name) \
408 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
409 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
410 emitCall(i, Machine::cti_##name); \
411 emitPutResult(instruction[i + 1].u.operand); \
416 #define CTI_COMPILE_UNARY_OP(name) \
418 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
419 emitCall(i, Machine::cti_##name); \
420 emitPutResult(instruction[i + 1].u.operand); \
425 #if ENABLE(SAMPLING_TOOL)
426 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
429 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
431 int dst = instruction[i + 1].u.operand;
432 int firstArg = instruction[i + 4].u.operand;
433 int argCount = instruction[i + 5].u.operand;
435 if (type == OpConstruct) {
436 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
437 emitPutArgConstant(argCount, 12);
438 emitPutArgConstant(firstArg, 8);
439 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
441 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
442 emitPutArgConstant(argCount, 12);
443 emitPutArgConstant(firstArg, 8);
444 // FIXME: should this be loaded dynamically off m_exec?
445 int thisVal = instruction[i + 3].u.operand;
446 if (thisVal == missingThisObjectMarker()) {
447 emitPutArgConstant(reinterpret_cast<unsigned>(m_exec->globalThisValue()), 4);
449 emitGetPutArg(thisVal, 4, X86::ecx);
452 X86Assembler::JmpSrc wasEval;
453 if (type == OpCallEval) {
454 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
455 emitCall(i, Machine::cti_op_call_eval);
456 m_jit.emitRestoreArgumentReference();
458 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
460 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
461 wasEval = m_jit.emitUnlinkedJne();
463 // this reloads the first arg into ecx (checked just below).
464 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
466 // this sets up the first arg, and explicitly leaves the value in ecx (checked just below).
467 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
468 emitPutArg(X86::ecx, 0);
471 // initializeCallFrame!
472 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::CallerCodeBlock) * sizeof(Register), X86::edi);
473 m_jit.movl_i32m(reinterpret_cast<unsigned>(instruction + i), (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ReturnVPC) * sizeof(Register), X86::edi);
474 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
475 m_jit.movl_rm(X86::edx, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::CallerScopeChain) * sizeof(Register), X86::edi);
476 m_jit.movl_rm(X86::edi, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::CallerRegisters) * sizeof(Register), X86::edi);
477 m_jit.movl_i32m(dst, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ReturnValueRegister) * sizeof(Register), X86::edi);
478 m_jit.movl_i32m(firstArg, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ArgumentStartRegister) * sizeof(Register), X86::edi);
479 m_jit.movl_i32m(argCount, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ArgumentCount) * sizeof(Register), X86::edi);
480 m_jit.movl_rm(X86::ecx, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::Callee) * sizeof(Register), X86::edi);
481 m_jit.movl_i32m(0, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::OptionalCalleeActivation) * sizeof(Register), X86::edi);
482 // CTIReturnEIP (set in callee)
484 // Fast check for JS function.
485 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
486 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
487 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
488 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
489 m_jit.link(isNotObject, m_jit.label());
491 // This handles host functions
492 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
493 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
495 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
496 m_jit.link(isJSFunction, m_jit.label());
498 // This handles JSFunctions
499 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction));
500 // Check the ctiCode has been generated - if not, this is handled in a slow case.
501 m_jit.testl_rr(X86::eax, X86::eax);
502 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
503 m_jit.call_r(X86::eax);
505 // In the interpreter the following actions are performed by op_ret:
507 // Store the scope chain - returned by op_ret in %edx (see below) - to ExecState::m_scopeChain and CTI_ARGS_scopeChain on the stack.
508 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
509 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
510 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
511 // Restore ExecState::m_callFrame.
512 m_jit.leal_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) * sizeof(Register), X86::edi, X86::edx);
513 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
514 // Restore CTI_ARGS_codeBlock.
515 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
517 X86Assembler::JmpDst end = m_jit.label();
518 m_jit.link(wasNotJSFunction, end);
519 if (type == OpCallEval)
520 m_jit.link(wasEval, end);
525 void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
527 bool negated = (type == OpNStrictEq);
529 unsigned dst = instruction[i + 1].u.operand;
530 unsigned src1 = instruction[i + 2].u.operand;
531 unsigned src2 = instruction[i + 3].u.operand;
533 emitGetArg(src1, X86::eax);
534 emitGetArg(src2, X86::edx);
536 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
537 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
538 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
539 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
541 m_jit.cmpl_rr(X86::edx, X86::eax);
543 m_jit.setne_r(X86::eax);
545 m_jit.sete_r(X86::eax);
546 m_jit.movzbl_rr(X86::eax, X86::eax);
547 emitTagAsBoolImmediate(X86::eax);
549 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
551 m_jit.link(firstNotImmediate, m_jit.label());
553 // check that edx is immediate but not the zero immediate
554 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
555 m_jit.setz_r(X86::ecx);
556 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
557 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
558 m_jit.sete_r(X86::edx);
559 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
560 m_jit.orl_rr(X86::ecx, X86::edx);
562 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
564 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
566 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
568 m_jit.link(secondNotImmediate, m_jit.label());
569 // check that eax is not the zero immediate (we know it must be immediate)
570 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
571 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
573 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
575 m_jit.link(bothWereImmediates, m_jit.label());
576 m_jit.link(firstWasNotImmediate, m_jit.label());
581 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
583 m_jit.subl_i8r(1, X86::esi);
584 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
585 emitCall(opcodeIndex, Machine::cti_timeout_check);
587 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
588 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_globalData), X86::ecx, X86::ecx);
589 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
590 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
591 m_jit.link(skipTimeout, m_jit.label());
594 void CTI::privateCompileMainPass()
596 if (m_codeBlock->codeType == FunctionCode) {
597 for (int i = -m_codeBlock->numVars; i < 0; i++)
598 emitInitialiseRegister(i);
600 for (size_t i = 0; i < m_codeBlock->constantRegisters.size(); ++i)
601 emitInitialiseRegister(i);
603 Instruction* instruction = m_codeBlock->instructions.begin();
604 unsigned instructionCount = m_codeBlock->instructions.size();
606 unsigned structureIDInstructionIndex = 0;
608 for (unsigned i = 0; i < instructionCount; ) {
609 m_labels[i] = m_jit.label();
611 #if ENABLE(SAMPLING_TOOL)
612 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
615 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
616 m_jit.emitRestoreArgumentReference();
617 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
619 unsigned src = instruction[i + 2].u.operand;
620 if (src < m_codeBlock->constantRegisters.size())
621 m_jit.movl_i32r(reinterpret_cast<unsigned>(m_codeBlock->constantRegisters[src].jsValue(m_exec)), X86::edx);
623 emitGetArg(src, X86::edx);
624 emitPutResult(instruction[i + 1].u.operand, X86::edx);
629 unsigned dst = instruction[i + 1].u.operand;
630 unsigned src1 = instruction[i + 2].u.operand;
631 unsigned src2 = instruction[i + 3].u.operand;
632 if (src2 < m_codeBlock->constantRegisters.size()) {
633 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
634 if (JSImmediate::isNumber(value)) {
635 emitGetArg(src1, X86::eax);
636 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
637 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
638 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
643 } else if (!(src1 < m_codeBlock->constantRegisters.size())) {
644 emitGetArg(src1, X86::eax);
645 emitGetArg(src2, X86::edx);
646 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
647 emitFastArithDeTagImmediate(X86::eax);
648 m_jit.addl_rr(X86::edx, X86::eax);
649 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
654 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
655 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
656 emitCall(i, Machine::cti_op_add);
657 emitPutResult(instruction[i + 1].u.operand);
662 if (m_codeBlock->needsFullScopeChain)
663 emitCall(i, Machine::cti_op_end);
664 emitGetArg(instruction[i + 1].u.operand, X86::eax);
665 #if ENABLE(SAMPLING_TOOL)
666 m_jit.movl_i32m(-1, ¤tOpcodeID);
668 m_jit.pushl_m(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi);
674 unsigned target = instruction[i + 1].u.operand;
675 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
680 int srcDst = instruction[i + 1].u.operand;
681 emitGetArg(srcDst, X86::eax);
682 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
683 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
684 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
685 emitPutResult(srcDst, X86::eax);
690 emitSlowScriptCheck(i);
692 unsigned target = instruction[i + 1].u.operand;
693 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
697 case op_loop_if_less: {
698 emitSlowScriptCheck(i);
700 unsigned target = instruction[i + 3].u.operand;
701 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
703 emitGetArg(instruction[i + 1].u.operand, X86::edx);
704 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
705 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
706 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
708 emitGetArg(instruction[i + 1].u.operand, X86::eax);
709 emitGetArg(instruction[i + 2].u.operand, X86::edx);
710 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
711 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
712 m_jit.cmpl_rr(X86::edx, X86::eax);
713 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
718 case op_loop_if_lesseq: {
719 emitSlowScriptCheck(i);
721 unsigned target = instruction[i + 3].u.operand;
722 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
724 emitGetArg(instruction[i + 1].u.operand, X86::edx);
725 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
726 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
727 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
729 emitGetArg(instruction[i + 1].u.operand, X86::eax);
730 emitGetArg(instruction[i + 2].u.operand, X86::edx);
731 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
732 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
733 m_jit.cmpl_rr(X86::edx, X86::eax);
734 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
739 case op_new_object: {
740 emitCall(i, Machine::cti_op_new_object);
741 emitPutResult(instruction[i + 1].u.operand);
746 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
747 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
748 // such that the StructureID & offset are always at the same distance from this.
750 emitGetArg(instruction[i + 1].u.operand, X86::eax);
751 emitGetArg(instruction[i + 3].u.operand, X86::edx);
753 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
754 X86Assembler::JmpDst hotPathBegin = m_jit.label();
755 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
756 ++structureIDInstructionIndex;
758 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
759 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
760 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
761 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
762 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
763 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
765 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
766 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
767 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
768 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
774 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
775 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
776 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
777 // to jump back to if one of these trampolies finds a match.
779 emitGetArg(instruction[i + 2].u.operand, X86::eax);
781 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
783 X86Assembler::JmpDst hotPathBegin = m_jit.label();
784 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
785 ++structureIDInstructionIndex;
787 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
788 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
789 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
790 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
791 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
793 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
794 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
795 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
796 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
801 case op_instanceof: {
802 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
803 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
804 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
806 // check if any are immediates
807 m_jit.orl_rr(X86::eax, X86::ecx);
808 m_jit.orl_rr(X86::edx, X86::ecx);
809 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
811 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
813 // check that all are object type - this is a bit of a bithack to avoid excess branching;
814 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
815 // this works because NumberType and StringType are smaller
816 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
817 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
818 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
819 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
820 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
821 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
822 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
823 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
825 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
827 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
828 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
829 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
830 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
832 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
834 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
835 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
837 // optimistically load true result
838 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(true)), X86::eax);
840 X86Assembler::JmpDst loop = m_jit.label();
842 // load value's prototype
843 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
844 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
846 m_jit.cmpl_rr(X86::ecx, X86::edx);
847 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
849 m_jit.cmpl_i32r(reinterpret_cast<int32_t>(jsNull()), X86::ecx);
850 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
851 m_jit.link(goToLoop, loop);
853 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(false)), X86::eax);
855 m_jit.link(exit, m_jit.label());
857 emitPutResult(instruction[i + 1].u.operand);
863 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
864 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
865 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
866 emitCall(i, Machine::cti_op_del_by_id);
867 emitPutResult(instruction[i + 1].u.operand);
872 unsigned dst = instruction[i + 1].u.operand;
873 unsigned src1 = instruction[i + 2].u.operand;
874 unsigned src2 = instruction[i + 3].u.operand;
875 if (src1 < m_codeBlock->constantRegisters.size() || src2 < m_codeBlock->constantRegisters.size()) {
876 unsigned constant = src1;
877 unsigned nonconstant = src2;
878 if (!(src1 < m_codeBlock->constantRegisters.size())) {
882 JSValue* value = m_codeBlock->constantRegisters[constant].jsValue(m_exec);
883 if (JSImmediate::isNumber(value)) {
884 emitGetArg(nonconstant, X86::eax);
885 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
886 emitFastArithImmToInt(X86::eax);
887 m_jit.imull_i32r( X86::eax, getDeTaggedConstantImmediate(value), X86::eax);
888 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
889 emitFastArithPotentiallyReTagImmediate(X86::eax);
896 emitGetArg(src1, X86::eax);
897 emitGetArg(src2, X86::edx);
898 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
899 emitFastArithDeTagImmediate(X86::eax);
900 emitFastArithImmToInt(X86::edx);
901 m_jit.imull_rr(X86::edx, X86::eax);
902 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
903 emitFastArithPotentiallyReTagImmediate(X86::eax);
909 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
910 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
911 emitCall(i, Machine::cti_op_new_func);
912 emitPutResult(instruction[i + 1].u.operand);
917 compileOpCall(instruction, i);
921 case op_get_global_var: {
922 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
923 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
924 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
925 emitPutResult(instruction[i + 1].u.operand, X86::eax);
929 case op_put_global_var: {
930 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
931 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
932 emitGetArg(instruction[i + 3].u.operand, X86::edx);
933 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
937 case op_get_scoped_var: {
938 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
940 emitGetCTIParam(CTI_ARGS_scopeChain, X86::eax);
942 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
944 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
945 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
946 emitPutResult(instruction[i + 1].u.operand);
950 case op_put_scoped_var: {
951 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
953 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
954 emitGetArg(instruction[i + 3].u.operand, X86::eax);
956 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
958 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
959 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
964 // Check for an activation - if there is one, jump to the hook below.
965 m_jit.cmpl_i32m(0, -(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::OptionalCalleeActivation) * sizeof(Register), X86::edi);
966 X86Assembler::JmpSrc activation = m_jit.emitUnlinkedJne();
967 X86Assembler::JmpDst activated = m_jit.label();
969 // Check for a profiler - if there is one, jump to the hook below.
970 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
971 m_jit.cmpl_i32m(0, X86::eax);
972 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
973 X86Assembler::JmpDst profiled = m_jit.label();
975 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
976 if (m_codeBlock->needsFullScopeChain)
977 emitCall(i, Machine::cti_op_ret_scopeChain);
979 // Return the result in %eax, and the caller scope chain in %edx (this is read from the callee call frame,
980 // but is only assigned to ExecState::m_scopeChain if returning to a JSFunction).
981 emitGetArg(instruction[i + 1].u.operand, X86::eax);
982 m_jit.movl_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::CallerScopeChain) * sizeof(Register), X86::edi, X86::edx);
983 // Restore the machine return addess from the callframe, roll the callframe back to the caller callframe,
984 // and preserve a copy of r on the stack at CTI_ARGS_r.
985 m_jit.movl_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi, X86::ecx);
986 m_jit.movl_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::CallerRegisters) * sizeof(Register), X86::edi, X86::edi);
987 emitPutCTIParam(X86::edi, CTI_ARGS_r);
989 m_jit.pushl_r(X86::ecx);
993 m_jit.link(activation, m_jit.label());
994 emitCall(i, Machine::cti_op_ret_activation);
995 m_jit.link(m_jit.emitUnlinkedJmp(), activated);
998 m_jit.link(profile, m_jit.label());
999 emitCall(i, Machine::cti_op_ret_profiler);
1000 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
1005 case op_new_array: {
1006 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1007 emitPutArg(X86::edx, 0);
1008 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1009 emitCall(i, Machine::cti_op_new_array);
1010 emitPutResult(instruction[i + 1].u.operand);
1015 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1016 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1017 emitCall(i, Machine::cti_op_resolve);
1018 emitPutResult(instruction[i + 1].u.operand);
1022 case op_construct: {
1023 compileOpCall(instruction, i, OpConstruct);
1027 case op_construct_verify: {
1028 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1030 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1031 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1032 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1033 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1034 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1036 m_jit.link(isImmediate, m_jit.label());
1037 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
1038 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1039 m_jit.link(isObject, m_jit.label());
1044 case op_get_by_val: {
1045 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1046 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1047 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1048 emitFastArithImmToInt(X86::edx);
1049 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1050 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1051 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1052 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1054 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1055 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1056 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1057 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1059 // Get the value from the vector
1060 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1061 emitPutResult(instruction[i + 1].u.operand);
1065 case op_resolve_func: {
1066 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1067 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1068 emitCall(i, Machine::cti_op_resolve_func);
1069 emitPutResult(instruction[i + 1].u.operand);
1070 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1071 emitPutResult(instruction[i + 2].u.operand);
1076 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1077 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1078 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1079 m_jit.subl_rr(X86::edx, X86::eax);
1080 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1081 emitFastArithReTagImmediate(X86::eax);
1082 emitPutResult(instruction[i + 1].u.operand);
1086 case op_put_by_val: {
1087 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1088 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1089 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1090 emitFastArithImmToInt(X86::edx);
1091 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1092 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1093 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1094 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1096 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1097 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1098 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1099 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1100 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1101 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1102 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1104 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1105 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1106 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1107 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1109 // All good - put the value into the array.
1110 m_jit.link(inFastVector, m_jit.label());
1111 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1112 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1116 CTI_COMPILE_BINARY_OP(op_lesseq)
1117 case op_loop_if_true: {
1118 emitSlowScriptCheck(i);
1120 unsigned target = instruction[i + 2].u.operand;
1121 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1123 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1124 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1125 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1126 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1128 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1129 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1130 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1131 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1133 m_jit.link(isZero, m_jit.label());
1137 case op_resolve_base: {
1138 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1139 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1140 emitCall(i, Machine::cti_op_resolve_base);
1141 emitPutResult(instruction[i + 1].u.operand);
1146 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1147 emitCall(i, Machine::cti_op_negate);
1148 emitPutResult(instruction[i + 1].u.operand);
1152 case op_resolve_skip: {
1153 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1154 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1155 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1156 emitCall(i, Machine::cti_op_resolve_skip);
1157 emitPutResult(instruction[i + 1].u.operand);
1161 case op_resolve_global: {
1163 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1164 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1165 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1166 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1168 // Check StructureID of global object
1169 m_jit.movl_i32r(globalObject, X86::eax);
1170 m_jit.movl_mr(structureIDAddr, X86::edx);
1171 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1172 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1173 m_slowCases.append(SlowCaseEntry(slowCase, i));
1175 // Load cached property
1176 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1177 m_jit.movl_mr(offsetAddr, X86::edx);
1178 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1179 emitPutResult(instruction[i + 1].u.operand);
1180 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1183 m_jit.link(slowCase, m_jit.label());
1184 emitPutArgConstant(globalObject, 0);
1185 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1186 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1187 emitCall(i, Machine::cti_op_resolve_global);
1188 emitPutResult(instruction[i + 1].u.operand);
1189 m_jit.link(end, m_jit.label());
1191 ++structureIDInstructionIndex;
1194 CTI_COMPILE_BINARY_OP(op_div)
1196 int srcDst = instruction[i + 1].u.operand;
1197 emitGetArg(srcDst, X86::eax);
1198 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1199 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1200 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1201 emitPutResult(srcDst, X86::eax);
1206 unsigned target = instruction[i + 3].u.operand;
1207 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1209 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1210 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1211 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1212 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1214 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1215 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1216 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1217 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1218 m_jit.cmpl_rr(X86::edx, X86::eax);
1219 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1225 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1226 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1227 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1228 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1229 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1230 emitPutResult(instruction[i + 1].u.operand);
1235 unsigned target = instruction[i + 2].u.operand;
1236 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1238 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1239 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1240 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1241 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1243 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1244 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1245 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1246 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1248 m_jit.link(isNonZero, m_jit.label());
1253 int srcDst = instruction[i + 2].u.operand;
1254 emitGetArg(srcDst, X86::eax);
1255 m_jit.movl_rr(X86::eax, X86::edx);
1256 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1257 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1258 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1259 emitPutResult(srcDst, X86::edx);
1260 emitPutResult(instruction[i + 1].u.operand);
1264 case op_unexpected_load: {
1265 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1266 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1267 emitPutResult(instruction[i + 1].u.operand);
1272 int retAddrDst = instruction[i + 1].u.operand;
1273 int target = instruction[i + 2].u.operand;
1274 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1275 X86Assembler::JmpDst addrPosition = m_jit.label();
1276 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1277 X86Assembler::JmpDst sretTarget = m_jit.label();
1278 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1283 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1288 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1289 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1290 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1291 m_jit.cmpl_rr(X86::edx, X86::eax);
1292 m_jit.sete_r(X86::eax);
1293 m_jit.movzbl_rr(X86::eax, X86::eax);
1294 emitTagAsBoolImmediate(X86::eax);
1295 emitPutResult(instruction[i + 1].u.operand);
1300 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1301 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1302 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1303 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1304 emitFastArithImmToInt(X86::eax);
1305 emitFastArithImmToInt(X86::ecx);
1306 m_jit.shll_CLr(X86::eax);
1307 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1308 emitPutResult(instruction[i + 1].u.operand);
1313 unsigned src1 = instruction[i + 2].u.operand;
1314 unsigned src2 = instruction[i + 3].u.operand;
1315 unsigned dst = instruction[i + 1].u.operand;
1316 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1317 emitGetArg(src2, X86::eax);
1318 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1319 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1321 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1322 emitGetArg(src1, X86::eax);
1323 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1324 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1327 emitGetArg(src1, X86::eax);
1328 emitGetArg(src2, X86::edx);
1329 m_jit.andl_rr(X86::edx, X86::eax);
1330 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1337 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1338 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1339 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1340 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1341 emitFastArithImmToInt(X86::ecx);
1342 m_jit.sarl_CLr(X86::eax);
1343 emitFastArithPotentiallyReTagImmediate(X86::eax);
1344 emitPutResult(instruction[i + 1].u.operand);
1349 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1350 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1351 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1352 emitPutResult(instruction[i + 1].u.operand);
1356 case op_resolve_with_base: {
1357 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1358 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1359 emitCall(i, Machine::cti_op_resolve_with_base);
1360 emitPutResult(instruction[i + 1].u.operand);
1361 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1362 emitPutResult(instruction[i + 2].u.operand);
1366 case op_new_func_exp: {
1367 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1368 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1369 emitCall(i, Machine::cti_op_new_func_exp);
1370 emitPutResult(instruction[i + 1].u.operand);
1375 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1376 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1377 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1378 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1379 emitFastArithDeTagImmediate(X86::eax);
1380 emitFastArithDeTagImmediate(X86::ecx);
1381 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1383 m_jit.idivl_r(X86::ecx);
1384 emitFastArithReTagImmediate(X86::edx);
1385 m_jit.movl_rr(X86::edx, X86::eax);
1386 emitPutResult(instruction[i + 1].u.operand);
1391 unsigned target = instruction[i + 2].u.operand;
1392 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1394 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1395 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1396 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1397 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1399 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1400 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1401 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1402 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1404 m_jit.link(isZero, m_jit.label());
1408 CTI_COMPILE_BINARY_OP(op_less)
1409 CTI_COMPILE_BINARY_OP(op_neq)
1411 int srcDst = instruction[i + 2].u.operand;
1412 emitGetArg(srcDst, X86::eax);
1413 m_jit.movl_rr(X86::eax, X86::edx);
1414 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1415 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1416 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1417 emitPutResult(srcDst, X86::edx);
1418 emitPutResult(instruction[i + 1].u.operand);
1422 CTI_COMPILE_BINARY_OP(op_urshift)
1424 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1425 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1426 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1427 m_jit.xorl_rr(X86::edx, X86::eax);
1428 emitFastArithReTagImmediate(X86::eax);
1429 emitPutResult(instruction[i + 1].u.operand);
1433 case op_new_regexp: {
1434 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1435 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1436 emitCall(i, Machine::cti_op_new_regexp);
1437 emitPutResult(instruction[i + 1].u.operand);
1442 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1443 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1444 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1445 m_jit.orl_rr(X86::edx, X86::eax);
1446 emitPutResult(instruction[i + 1].u.operand);
1450 case op_call_eval: {
1451 compileOpCall(instruction, i, OpCallEval);
1456 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1457 emitCall(i, Machine::cti_op_throw);
1458 m_jit.addl_i8r(0x24, X86::esp);
1459 m_jit.popl_r(X86::edi);
1460 m_jit.popl_r(X86::esi);
1465 case op_get_pnames: {
1466 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1467 emitCall(i, Machine::cti_op_get_pnames);
1468 emitPutResult(instruction[i + 1].u.operand);
1472 case op_next_pname: {
1473 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1474 unsigned target = instruction[i + 3].u.operand;
1475 emitCall(i, Machine::cti_op_next_pname);
1476 m_jit.testl_rr(X86::eax, X86::eax);
1477 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1478 emitPutResult(instruction[i + 1].u.operand);
1479 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1480 m_jit.link(endOfIter, m_jit.label());
1484 case op_push_scope: {
1485 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1486 emitCall(i, Machine::cti_op_push_scope);
1490 case op_pop_scope: {
1491 emitCall(i, Machine::cti_op_pop_scope);
1495 CTI_COMPILE_UNARY_OP(op_typeof)
1496 CTI_COMPILE_UNARY_OP(op_is_undefined)
1497 CTI_COMPILE_UNARY_OP(op_is_boolean)
1498 CTI_COMPILE_UNARY_OP(op_is_number)
1499 CTI_COMPILE_UNARY_OP(op_is_string)
1500 CTI_COMPILE_UNARY_OP(op_is_object)
1501 CTI_COMPILE_UNARY_OP(op_is_function)
1503 compileOpStrictEq(instruction, i, OpStrictEq);
1507 case op_nstricteq: {
1508 compileOpStrictEq(instruction, i, OpNStrictEq);
1512 case op_to_jsnumber: {
1513 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1514 emitCall(i, Machine::cti_op_to_jsnumber);
1515 emitPutResult(instruction[i + 1].u.operand);
1520 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1521 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1522 emitCall(i, Machine::cti_op_in);
1523 emitPutResult(instruction[i + 1].u.operand);
1527 case op_push_new_scope: {
1528 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1529 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1530 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1531 emitCall(i, Machine::cti_op_push_new_scope);
1532 emitPutResult(instruction[i + 1].u.operand);
1537 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1538 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1539 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_exception), X86::ecx, X86::eax);
1540 m_jit.movl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
1541 emitPutResult(instruction[i + 1].u.operand);
1545 case op_jmp_scopes: {
1546 unsigned count = instruction[i + 1].u.operand;
1547 emitPutArgConstant(count, 0);
1548 emitCall(i, Machine::cti_op_jmp_scopes);
1549 unsigned target = instruction[i + 2].u.operand;
1550 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1554 case op_put_by_index: {
1555 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1556 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1557 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1558 emitCall(i, Machine::cti_op_put_by_index);
1562 case op_switch_imm: {
1563 unsigned tableIndex = instruction[i + 1].u.operand;
1564 unsigned defaultOffset = instruction[i + 2].u.operand;
1565 unsigned scrutinee = instruction[i + 3].u.operand;
1567 // create jump table for switch destinations, track this switch statement.
1568 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1569 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1570 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1572 emitGetPutArg(scrutinee, 0, X86::ecx);
1573 emitPutArgConstant(tableIndex, 4);
1574 emitCall(i, Machine::cti_op_switch_imm);
1575 m_jit.jmp_r(X86::eax);
1579 case op_switch_char: {
1580 unsigned tableIndex = instruction[i + 1].u.operand;
1581 unsigned defaultOffset = instruction[i + 2].u.operand;
1582 unsigned scrutinee = instruction[i + 3].u.operand;
1584 // create jump table for switch destinations, track this switch statement.
1585 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1586 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1587 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1589 emitGetPutArg(scrutinee, 0, X86::ecx);
1590 emitPutArgConstant(tableIndex, 4);
1591 emitCall(i, Machine::cti_op_switch_char);
1592 m_jit.jmp_r(X86::eax);
1596 case op_switch_string: {
1597 unsigned tableIndex = instruction[i + 1].u.operand;
1598 unsigned defaultOffset = instruction[i + 2].u.operand;
1599 unsigned scrutinee = instruction[i + 3].u.operand;
1601 // create jump table for switch destinations, track this switch statement.
1602 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1603 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1605 emitGetPutArg(scrutinee, 0, X86::ecx);
1606 emitPutArgConstant(tableIndex, 4);
1607 emitCall(i, Machine::cti_op_switch_string);
1608 m_jit.jmp_r(X86::eax);
1612 case op_del_by_val: {
1613 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1614 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1615 emitCall(i, Machine::cti_op_del_by_val);
1616 emitPutResult(instruction[i + 1].u.operand);
1620 case op_put_getter: {
1621 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1622 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1623 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1624 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1625 emitCall(i, Machine::cti_op_put_getter);
1629 case op_put_setter: {
1630 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1631 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1632 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1633 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1634 emitCall(i, Machine::cti_op_put_setter);
1638 case op_new_error: {
1639 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1640 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1641 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1642 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1643 emitCall(i, Machine::cti_op_new_error);
1644 emitPutResult(instruction[i + 1].u.operand);
1649 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1650 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1651 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1652 emitCall(i, Machine::cti_op_debug);
1657 unsigned dst = instruction[i + 1].u.operand;
1658 unsigned src1 = instruction[i + 2].u.operand;
1660 emitGetArg(src1, X86::eax);
1661 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1662 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1664 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1665 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1666 m_jit.setnz_r(X86::eax);
1668 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1670 m_jit.link(isImmediate, m_jit.label());
1672 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1673 m_jit.andl_rr(X86::eax, X86::ecx);
1674 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1675 m_jit.sete_r(X86::eax);
1677 m_jit.link(wasNotImmediate, m_jit.label());
1679 m_jit.movzbl_rr(X86::eax, X86::eax);
1680 emitTagAsBoolImmediate(X86::eax);
1687 unsigned dst = instruction[i + 1].u.operand;
1688 unsigned src1 = instruction[i + 2].u.operand;
1690 emitGetArg(src1, X86::eax);
1691 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1692 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1694 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1695 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1696 m_jit.setz_r(X86::eax);
1698 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1700 m_jit.link(isImmediate, m_jit.label());
1702 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1703 m_jit.andl_rr(X86::eax, X86::ecx);
1704 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1705 m_jit.setne_r(X86::eax);
1707 m_jit.link(wasNotImmediate, m_jit.label());
1709 m_jit.movzbl_rr(X86::eax, X86::eax);
1710 emitTagAsBoolImmediate(X86::eax);
1716 case op_initialise_locals: {
1720 case op_get_array_length:
1721 case op_get_by_id_chain:
1722 case op_get_by_id_generic:
1723 case op_get_by_id_proto:
1724 case op_get_by_id_self:
1725 case op_get_string_length:
1726 case op_put_by_id_generic:
1727 case op_put_by_id_replace:
1728 case op_put_by_id_transition:
1729 ASSERT_NOT_REACHED();
1733 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1737 void CTI::privateCompileLinkPass()
1739 unsigned jmpTableCount = m_jmpTable.size();
1740 for (unsigned i = 0; i < jmpTableCount; ++i)
1741 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
1745 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
1747 m_jit.link(iter->from, m_jit.label()); \
1748 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
1749 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
1750 emitCall(i, Machine::cti_##name); \
1751 emitPutResult(instruction[i + 1].u.operand); \
1756 void CTI::privateCompileSlowCases()
1758 unsigned structureIDInstructionIndex = 0;
1760 Instruction* instruction = m_codeBlock->instructions.begin();
1761 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
1762 unsigned i = iter->to;
1763 m_jit.emitRestoreArgumentReference();
1764 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
1766 unsigned dst = instruction[i + 1].u.operand;
1767 unsigned src2 = instruction[i + 3].u.operand;
1768 if (src2 < m_codeBlock->constantRegisters.size()) {
1769 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
1770 if (JSImmediate::isNumber(value)) {
1771 X86Assembler::JmpSrc notImm = iter->from;
1772 m_jit.link((++iter)->from, m_jit.label());
1773 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1774 m_jit.link(notImm, m_jit.label());
1775 emitPutArg(X86::eax, 0);
1776 emitGetPutArg(src2, 4, X86::ecx);
1777 emitCall(i, Machine::cti_op_add);
1784 ASSERT(!(static_cast<unsigned>(instruction[i + 2].u.operand) < m_codeBlock->constantRegisters.size()));
1786 X86Assembler::JmpSrc notImm = iter->from;
1787 m_jit.link((++iter)->from, m_jit.label());
1788 m_jit.subl_rr(X86::edx, X86::eax);
1789 emitFastArithReTagImmediate(X86::eax);
1790 m_jit.link(notImm, m_jit.label());
1791 emitPutArg(X86::eax, 0);
1792 emitPutArg(X86::edx, 4);
1793 emitCall(i, Machine::cti_op_add);
1798 case op_get_by_val: {
1799 // The slow case that handles accesses to arrays (below) may jump back up to here.
1800 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
1802 X86Assembler::JmpSrc notImm = iter->from;
1803 m_jit.link((++iter)->from, m_jit.label());
1804 m_jit.link((++iter)->from, m_jit.label());
1805 emitFastArithIntToImmNoCheck(X86::edx);
1806 m_jit.link(notImm, m_jit.label());
1807 emitPutArg(X86::eax, 0);
1808 emitPutArg(X86::edx, 4);
1809 emitCall(i, Machine::cti_op_get_by_val);
1810 emitPutResult(instruction[i + 1].u.operand);
1811 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1813 // This is slow case that handles accesses to arrays above the fast cut-off.
1814 // First, check if this is an access to the vector
1815 m_jit.link((++iter)->from, m_jit.label());
1816 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1817 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
1819 // okay, missed the fast region, but it is still in the vector. Get the value.
1820 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
1821 // Check whether the value loaded is zero; if so we need to return undefined.
1822 m_jit.testl_rr(X86::ecx, X86::ecx);
1823 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
1824 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1830 X86Assembler::JmpSrc notImm = iter->from;
1831 m_jit.link((++iter)->from, m_jit.label());
1832 m_jit.addl_rr(X86::edx, X86::eax);
1833 m_jit.link(notImm, m_jit.label());
1834 emitPutArg(X86::eax, 0);
1835 emitPutArg(X86::edx, 4);
1836 emitCall(i, Machine::cti_op_sub);
1837 emitPutResult(instruction[i + 1].u.operand);
1842 m_jit.link(iter->from, m_jit.label());
1843 m_jit.link((++iter)->from, m_jit.label());
1844 emitPutArg(X86::eax, 0);
1845 emitPutArg(X86::ecx, 4);
1846 emitCall(i, Machine::cti_op_rshift);
1847 emitPutResult(instruction[i + 1].u.operand);
1852 X86Assembler::JmpSrc notImm1 = iter->from;
1853 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1854 m_jit.link((++iter)->from, m_jit.label());
1855 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1856 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1857 m_jit.link(notImm1, m_jit.label());
1858 m_jit.link(notImm2, m_jit.label());
1859 emitPutArg(X86::eax, 0);
1860 emitPutArg(X86::ecx, 4);
1861 emitCall(i, Machine::cti_op_lshift);
1862 emitPutResult(instruction[i + 1].u.operand);
1866 case op_loop_if_less: {
1867 emitSlowScriptCheck(i);
1869 unsigned target = instruction[i + 3].u.operand;
1870 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1872 m_jit.link(iter->from, m_jit.label());
1873 emitPutArg(X86::edx, 0);
1874 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1875 emitCall(i, Machine::cti_op_loop_if_less);
1876 m_jit.testl_rr(X86::eax, X86::eax);
1877 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1879 m_jit.link(iter->from, m_jit.label());
1880 m_jit.link((++iter)->from, m_jit.label());
1881 emitPutArg(X86::eax, 0);
1882 emitPutArg(X86::edx, 4);
1883 emitCall(i, Machine::cti_op_loop_if_less);
1884 m_jit.testl_rr(X86::eax, X86::eax);
1885 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1890 case op_put_by_id: {
1891 m_jit.link(iter->from, m_jit.label());
1892 m_jit.link((++iter)->from, m_jit.label());
1894 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1895 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1896 emitPutArg(X86::eax, 0);
1897 emitPutArg(X86::edx, 8);
1898 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
1900 // Track the location of the call; this will be used to recover repatch information.
1901 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1902 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1903 ++structureIDInstructionIndex;
1908 case op_get_by_id: {
1909 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1910 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1911 // of the call (which we can use to look up the repatch information), but should a array-length or
1912 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back
1913 // the distance from the call to the head of the slow case.
1915 m_jit.link(iter->from, m_jit.label());
1916 m_jit.link((++iter)->from, m_jit.label());
1919 X86Assembler::JmpDst coldPathBegin = m_jit.label();
1921 emitPutArg(X86::eax, 0);
1922 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1923 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1924 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
1925 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
1926 emitPutResult(instruction[i + 1].u.operand);
1928 // Track the location of the call; this will be used to recover repatch information.
1929 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1930 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1931 ++structureIDInstructionIndex;
1936 case op_resolve_global: {
1937 ++structureIDInstructionIndex;
1941 case op_loop_if_lesseq: {
1942 emitSlowScriptCheck(i);
1944 unsigned target = instruction[i + 3].u.operand;
1945 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1947 m_jit.link(iter->from, m_jit.label());
1948 emitPutArg(X86::edx, 0);
1949 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1950 emitCall(i, Machine::cti_op_loop_if_lesseq);
1951 m_jit.testl_rr(X86::eax, X86::eax);
1952 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1954 m_jit.link(iter->from, m_jit.label());
1955 m_jit.link((++iter)->from, m_jit.label());
1956 emitPutArg(X86::eax, 0);
1957 emitPutArg(X86::edx, 4);
1958 emitCall(i, Machine::cti_op_loop_if_lesseq);
1959 m_jit.testl_rr(X86::eax, X86::eax);
1960 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1966 unsigned srcDst = instruction[i + 1].u.operand;
1967 X86Assembler::JmpSrc notImm = iter->from;
1968 m_jit.link((++iter)->from, m_jit.label());
1969 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1970 m_jit.link(notImm, m_jit.label());
1971 emitPutArg(X86::eax, 0);
1972 emitCall(i, Machine::cti_op_pre_inc);
1973 emitPutResult(srcDst);
1977 case op_put_by_val: {
1978 // Normal slow cases - either is not an immediate imm, or is an array.
1979 X86Assembler::JmpSrc notImm = iter->from;
1980 m_jit.link((++iter)->from, m_jit.label());
1981 m_jit.link((++iter)->from, m_jit.label());
1982 emitFastArithIntToImmNoCheck(X86::edx);
1983 m_jit.link(notImm, m_jit.label());
1984 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1985 emitPutArg(X86::eax, 0);
1986 emitPutArg(X86::edx, 4);
1987 emitPutArg(X86::ecx, 8);
1988 emitCall(i, Machine::cti_op_put_by_val);
1989 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1991 // slow cases for immediate int accesses to arrays
1992 m_jit.link((++iter)->from, m_jit.label());
1993 m_jit.link((++iter)->from, m_jit.label());
1994 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1995 emitPutArg(X86::eax, 0);
1996 emitPutArg(X86::edx, 4);
1997 emitPutArg(X86::ecx, 8);
1998 emitCall(i, Machine::cti_op_put_by_val_array);
2003 case op_loop_if_true: {
2004 emitSlowScriptCheck(i);
2006 m_jit.link(iter->from, m_jit.label());
2007 emitPutArg(X86::eax, 0);
2008 emitCall(i, Machine::cti_op_jtrue);
2009 m_jit.testl_rr(X86::eax, X86::eax);
2010 unsigned target = instruction[i + 2].u.operand;
2011 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2016 unsigned srcDst = instruction[i + 1].u.operand;
2017 X86Assembler::JmpSrc notImm = iter->from;
2018 m_jit.link((++iter)->from, m_jit.label());
2019 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2020 m_jit.link(notImm, m_jit.label());
2021 emitPutArg(X86::eax, 0);
2022 emitCall(i, Machine::cti_op_pre_dec);
2023 emitPutResult(srcDst);
2028 unsigned target = instruction[i + 3].u.operand;
2029 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2031 m_jit.link(iter->from, m_jit.label());
2032 emitPutArg(X86::edx, 0);
2033 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2034 emitCall(i, Machine::cti_op_jless);
2035 m_jit.testl_rr(X86::eax, X86::eax);
2036 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2038 m_jit.link(iter->from, m_jit.label());
2039 m_jit.link((++iter)->from, m_jit.label());
2040 emitPutArg(X86::eax, 0);
2041 emitPutArg(X86::edx, 4);
2042 emitCall(i, Machine::cti_op_jless);
2043 m_jit.testl_rr(X86::eax, X86::eax);
2044 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2050 m_jit.link(iter->from, m_jit.label());
2051 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2052 emitPutArg(X86::eax, 0);
2053 emitCall(i, Machine::cti_op_not);
2054 emitPutResult(instruction[i + 1].u.operand);
2059 m_jit.link(iter->from, m_jit.label());
2060 emitPutArg(X86::eax, 0);
2061 emitCall(i, Machine::cti_op_jtrue);
2062 m_jit.testl_rr(X86::eax, X86::eax);
2063 unsigned target = instruction[i + 2].u.operand;
2064 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2069 unsigned srcDst = instruction[i + 2].u.operand;
2070 m_jit.link(iter->from, m_jit.label());
2071 m_jit.link((++iter)->from, m_jit.label());
2072 emitPutArg(X86::eax, 0);
2073 emitCall(i, Machine::cti_op_post_inc);
2074 emitPutResult(instruction[i + 1].u.operand);
2075 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2076 emitPutResult(srcDst);
2081 m_jit.link(iter->from, m_jit.label());
2082 emitPutArg(X86::eax, 0);
2083 emitCall(i, Machine::cti_op_bitnot);
2084 emitPutResult(instruction[i + 1].u.operand);
2089 unsigned src1 = instruction[i + 2].u.operand;
2090 unsigned src2 = instruction[i + 3].u.operand;
2091 unsigned dst = instruction[i + 1].u.operand;
2092 if (getConstantImmediateNumericArg(src1)) {
2093 m_jit.link(iter->from, m_jit.label());
2094 emitGetPutArg(src1, 0, X86::ecx);
2095 emitPutArg(X86::eax, 4);
2096 emitCall(i, Machine::cti_op_bitand);
2098 } else if (getConstantImmediateNumericArg(src2)) {
2099 m_jit.link(iter->from, m_jit.label());
2100 emitPutArg(X86::eax, 0);
2101 emitGetPutArg(src2, 4, X86::ecx);
2102 emitCall(i, Machine::cti_op_bitand);
2105 m_jit.link(iter->from, m_jit.label());
2106 emitGetPutArg(src1, 0, X86::ecx);
2107 emitPutArg(X86::edx, 4);
2108 emitCall(i, Machine::cti_op_bitand);
2115 m_jit.link(iter->from, m_jit.label());
2116 emitPutArg(X86::eax, 0);
2117 emitCall(i, Machine::cti_op_jtrue);
2118 m_jit.testl_rr(X86::eax, X86::eax);
2119 unsigned target = instruction[i + 2].u.operand;
2120 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2125 unsigned srcDst = instruction[i + 2].u.operand;
2126 m_jit.link(iter->from, m_jit.label());
2127 m_jit.link((++iter)->from, m_jit.label());
2128 emitPutArg(X86::eax, 0);
2129 emitCall(i, Machine::cti_op_post_dec);
2130 emitPutResult(instruction[i + 1].u.operand);
2131 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2132 emitPutResult(srcDst);
2137 m_jit.link(iter->from, m_jit.label());
2138 emitPutArg(X86::eax, 0);
2139 emitPutArg(X86::edx, 4);
2140 emitCall(i, Machine::cti_op_bitxor);
2141 emitPutResult(instruction[i + 1].u.operand);
2146 m_jit.link(iter->from, m_jit.label());
2147 emitPutArg(X86::eax, 0);
2148 emitPutArg(X86::edx, 4);
2149 emitCall(i, Machine::cti_op_bitor);
2150 emitPutResult(instruction[i + 1].u.operand);
2155 m_jit.link(iter->from, m_jit.label());
2156 emitPutArg(X86::eax, 0);
2157 emitPutArg(X86::edx, 4);
2158 emitCall(i, Machine::cti_op_eq);
2159 emitPutResult(instruction[i + 1].u.operand);
2163 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2164 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2165 case op_instanceof: {
2166 m_jit.link(iter->from, m_jit.label());
2167 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2168 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2169 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2170 emitCall(i, Machine::cti_op_instanceof);
2171 emitPutResult(instruction[i + 1].u.operand);
2176 X86Assembler::JmpSrc notImm1 = iter->from;
2177 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2178 m_jit.link((++iter)->from, m_jit.label());
2179 emitFastArithReTagImmediate(X86::eax);
2180 emitFastArithReTagImmediate(X86::ecx);
2181 m_jit.link(notImm1, m_jit.label());
2182 m_jit.link(notImm2, m_jit.label());
2183 emitPutArg(X86::eax, 0);
2184 emitPutArg(X86::ecx, 4);
2185 emitCall(i, Machine::cti_op_mod);
2186 emitPutResult(instruction[i + 1].u.operand);
2190 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_mul);
2193 case op_construct: {
2194 m_jit.link(iter->from, m_jit.label());
2195 m_jit.emitRestoreArgumentReference();
2197 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2198 emitCall(i, Machine::cti_vm_compile);
2199 m_jit.call_r(X86::eax);
2201 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2203 // In the interpreter the following actions are performed by op_ret:
2205 // Store the scope chain - returned by op_ret in %edx (see below) - to ExecState::m_scopeChain and CTI_ARGS_scopeChain on the stack.
2206 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
2207 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
2208 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
2209 // Restore ExecState::m_callFrame.
2210 m_jit.leal_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) * sizeof(Register), X86::edi, X86::edx);
2211 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
2212 // Restore CTI_ARGS_codeBlock.
2213 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
2215 emitPutResult(instruction[i + 1].u.operand);
2221 ASSERT_NOT_REACHED();
2225 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2228 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2231 void CTI::privateCompile()
2233 // Could use a popl_m, but would need to offset the following instruction if so.
2234 m_jit.popl_r(X86::ecx);
2235 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
2236 emitPutToCallFrameHeader(X86::ecx, RegisterFile::CTIReturnEIP);
2238 // Lazy copy of the scopeChain
2239 X86Assembler::JmpSrc callToUpdateScopeChain;
2240 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain) {
2241 m_jit.emitRestoreArgumentReference();
2242 callToUpdateScopeChain = m_jit.emitCall();
2245 privateCompileMainPass();
2246 privateCompileLinkPass();
2247 privateCompileSlowCases();
2249 ASSERT(m_jmpTable.isEmpty());
2251 void* code = m_jit.copy();
2254 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2255 for (unsigned i = 0; i < m_switches.size(); ++i) {
2256 SwitchRecord record = m_switches[i];
2257 unsigned opcodeIndex = record.m_opcodeIndex;
2259 if (record.m_type != SwitchRecord::String) {
2260 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2261 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2263 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2265 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2266 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2267 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2270 ASSERT(record.m_type == SwitchRecord::String);
2272 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2274 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2275 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2276 unsigned offset = it->second.branchOffset;
2277 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2282 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2283 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2285 // FIXME: There doesn't seem to be a way to hint to a hashmap that it should make a certain capacity available;
2286 // could be faster if we could do something like this:
2287 // m_codeBlock->ctiReturnAddressVPCMap.grow(m_calls.size());
2288 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2289 X86Assembler::link(code, iter->from, iter->to);
2290 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2293 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain)
2294 X86Assembler::link(code, callToUpdateScopeChain, (void*)Machine::cti_vm_updateScopeChain);
2296 // Link absolute addresses for jsr
2297 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2298 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2300 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2301 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2302 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2303 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2306 m_codeBlock->ctiCode = code;
2309 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2311 // Check eax is an object of the right StructureID.
2312 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2313 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2314 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2315 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2317 // Checks out okay! - getDirectOffset
2318 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2319 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2322 void* code = m_jit.copy();
2325 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2326 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2328 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2330 ctiRepatchCallByReturnAddress(returnAddress, code);
2333 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2335 #if USE(CTI_REPATCH_PIC)
2336 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2338 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2339 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2341 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2342 // referencing the prototype object - let's speculatively load it's table nice and early!)
2343 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2344 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2345 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2347 // check eax is an object of the right StructureID.
2348 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2349 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2350 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2351 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2353 // Check the prototype object's StructureID had not changed.
2354 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2355 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2356 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2358 // Checks out okay! - getDirectOffset
2359 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2361 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2363 void* code = m_jit.copy();
2366 // Use the repatch information to link the failure cases back to the original slow case routine.
2367 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2368 X86Assembler::link(code, failureCases1, slowCaseBegin);
2369 X86Assembler::link(code, failureCases2, slowCaseBegin);
2370 X86Assembler::link(code, failureCases3, slowCaseBegin);
2372 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2373 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2374 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2376 // Track the stub we have created so that it will be deleted later.
2377 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2379 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2380 // FIXME: should revert this repatching, on failure.
2381 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2382 X86Assembler::repatchBranchOffset(jmpLocation, code);
2384 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2385 // referencing the prototype object - let's speculatively load it's table nice and early!)
2386 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2387 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2388 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2390 // check eax is an object of the right StructureID.
2391 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2392 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2393 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2394 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2396 // Check the prototype object's StructureID had not changed.
2397 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2398 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2399 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2401 // Checks out okay! - getDirectOffset
2402 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2406 void* code = m_jit.copy();
2409 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2410 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2411 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2413 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2415 ctiRepatchCallByReturnAddress(returnAddress, code);
2419 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2423 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2425 // Check eax is an object of the right StructureID.
2426 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2427 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2428 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2429 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2431 StructureID* currStructureID = structureID;
2432 RefPtr<StructureID>* chainEntries = chain->head();
2433 JSObject* protoObject = 0;
2434 for (unsigned i = 0; i<count; ++i) {
2435 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2436 currStructureID = chainEntries[i].get();
2438 // Check the prototype object's StructureID had not changed.
2439 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2440 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2441 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2443 ASSERT(protoObject);
2445 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2446 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2447 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2450 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2452 void* code = m_jit.copy();
2455 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2456 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2458 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2460 ctiRepatchCallByReturnAddress(returnAddress, code);
2463 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2465 // check eax is an object of the right StructureID.
2466 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2467 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2468 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2469 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2471 // checks out okay! - putDirectOffset
2472 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2473 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2476 void* code = m_jit.copy();
2479 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2480 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2482 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2484 ctiRepatchCallByReturnAddress(returnAddress, code);
2489 static JSValue* SFX_CALL transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2491 StructureID* oldStructureID = newStructureID->previousID();
2493 baseObject->transitionTo(newStructureID);
2495 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2496 baseObject->allocatePropertyStorage(oldStructureID->propertyMap().storageSize(), oldStructureID->propertyMap().size());
2498 baseObject->putDirectOffset(cachedOffset, value);
2504 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2506 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2509 if (oldStructureID->propertyMap().storageSize() < JSObject::inlineStorageCapacity)
2512 if (oldStructureID->propertyMap().size() != newStructureID->propertyMap().size())
2518 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2520 Vector<X86Assembler::JmpSrc, 16> failureCases;
2521 // check eax is an object of the right StructureID.
2522 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2523 failureCases.append(m_jit.emitUnlinkedJne());
2524 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2525 failureCases.append(m_jit.emitUnlinkedJne());
2526 Vector<X86Assembler::JmpSrc> successCases;
2529 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2530 // proto(ecx) = baseObject->structureID()->prototype()
2531 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2532 failureCases.append(m_jit.emitUnlinkedJne());
2533 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2535 // ecx = baseObject->m_structureID
2536 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2537 // null check the prototype
2538 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2539 successCases.append(m_jit.emitUnlinkedJe());
2541 // Check the structure id
2542 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2543 failureCases.append(m_jit.emitUnlinkedJne());
2545 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2546 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2547 failureCases.append(m_jit.emitUnlinkedJne());
2548 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2551 failureCases.append(m_jit.emitUnlinkedJne());
2552 for (unsigned i = 0; i < successCases.size(); ++i)
2553 m_jit.link(successCases[i], m_jit.label());
2555 X86Assembler::JmpSrc callTarget;
2556 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2557 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2558 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2559 // codeblock should ensure oldStructureID->m_refCount > 0
2560 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2561 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2562 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2565 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2566 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2568 // Slow case transition -- we're going to need to quite a bit of work,
2569 // so just make a call
2570 m_jit.pushl_r(X86::edx);
2571 m_jit.pushl_r(X86::eax);
2572 m_jit.movl_i32r(cachedOffset, X86::eax);
2573 m_jit.pushl_r(X86::eax);
2574 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2575 m_jit.pushl_r(X86::eax);
2576 callTarget = m_jit.emitCall();
2577 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2580 void* code = m_jit.copy();
2583 for (unsigned i = 0; i < failureCases.size(); ++i)
2584 X86Assembler::link(code, failureCases[i], reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2586 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2587 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2589 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2591 ctiRepatchCallByReturnAddress(returnAddress, code);
2594 void* CTI::privateCompileArrayLengthTrampoline()
2596 // Check eax is an array
2597 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2598 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2599 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2600 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2602 // Checks out okay! - get the length from the storage
2603 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2604 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2606 m_jit.addl_rr(X86::eax, X86::eax);
2607 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2608 m_jit.addl_i8r(1, X86::eax);
2612 void* code = m_jit.copy();
2615 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2616 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2617 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2622 void* CTI::privateCompileStringLengthTrampoline()
2624 // Check eax is a string
2625 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2626 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2627 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2628 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2630 // Checks out okay! - get the length from the Ustring.
2631 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2632 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2634 m_jit.addl_rr(X86::eax, X86::eax);
2635 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2636 m_jit.addl_i8r(1, X86::eax);
2640 void* code = m_jit.copy();
2643 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2644 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2645 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2650 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2652 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2654 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2655 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2656 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2658 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2659 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2660 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2663 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2665 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2667 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2668 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2669 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2671 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2672 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2673 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2676 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2678 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2680 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2681 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2683 // Check eax is an array
2684 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2685 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2686 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2687 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2689 // Checks out okay! - get the length from the storage
2690 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2691 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2693 m_jit.addl_rr(X86::ecx, X86::ecx);
2694 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2695 m_jit.addl_i8r(1, X86::ecx);
2697 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2699 void* code = m_jit.copy();
2702 // Use the repatch information to link the failure cases back to the original slow case routine.
2703 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2704 X86Assembler::link(code, failureCases1, slowCaseBegin);
2705 X86Assembler::link(code, failureCases2, slowCaseBegin);
2706 X86Assembler::link(code, failureCases3, slowCaseBegin);
2708 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2709 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2710 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2712 // Track the stub we have created so that it will be deleted later.
2713 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2715 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2716 // FIXME: should revert this repatching, on failure.
2717 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2718 X86Assembler::repatchBranchOffset(jmpLocation, code);
2721 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
2723 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
2724 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
2725 m_jit.movl_mr(index * sizeof(Register), dst, dst);
2728 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
2730 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
2731 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
2732 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
2737 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
2739 // TODO: better error messages
2740 if (pattern.size() > MaxPatternSize) {
2741 *error_ptr = "regular expression too large";
2745 X86Assembler jit(exec->machine()->jitCodeBuffer());
2746 WRECParser parser(pattern, ignoreCase, multiline, jit);
2748 jit.emitConvertToFastCall();
2750 // Preserve regs & initialize outputRegister.
2751 jit.pushl_r(WRECGenerator::outputRegister);
2752 jit.pushl_r(WRECGenerator::currentValueRegister);
2753 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
2754 jit.pushl_r(WRECGenerator::currentPositionRegister);
2755 // load output pointer
2760 , X86::esp, WRECGenerator::outputRegister);
2762 // restart point on match fail.
2763 WRECGenerator::JmpDst nextLabel = jit.label();
2765 // (1) Parse Disjunction:
2767 // Parsing the disjunction should fully consume the pattern.
2768 JmpSrcVector failures;
2769 parser.parseDisjunction(failures);
2770 if (parser.isEndOfPattern()) {
2771 parser.m_err = WRECParser::Error_malformedPattern;
2774 // TODO: better error messages
2775 *error_ptr = "TODO: better error messages";
2780 // Set return value & pop registers from the stack.
2782 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
2783 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
2785 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
2786 jit.popl_r(X86::eax);
2787 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2788 jit.popl_r(WRECGenerator::currentValueRegister);
2789 jit.popl_r(WRECGenerator::outputRegister);
2792 jit.link(noOutput, jit.label());
2794 jit.popl_r(X86::eax);
2795 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2796 jit.popl_r(WRECGenerator::currentValueRegister);
2797 jit.popl_r(WRECGenerator::outputRegister);
2801 // All fails link to here. Progress the start point & if it is within scope, loop.
2802 // Otherwise, return fail value.
2803 WRECGenerator::JmpDst here = jit.label();
2804 for (unsigned i = 0; i < failures.size(); ++i)
2805 jit.link(failures[i], here);
2808 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
2809 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
2810 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
2811 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
2812 jit.link(jit.emitUnlinkedJle(), nextLabel);
2814 jit.addl_i8r(4, X86::esp);
2816 jit.movl_i32r(-1, X86::eax);
2817 jit.popl_r(WRECGenerator::currentValueRegister);
2818 jit.popl_r(WRECGenerator::outputRegister);
2821 *numSubpatterns_ptr = parser.m_numSubpatterns;
2823 void* code = jit.copy();
2828 #endif // ENABLE(WREC)
2832 #endif // ENABLE(CTI)