2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
34 #include "wrec/WREC.h"
40 #if COMPILER(GCC) && PLATFORM(X86)
42 ".globl _ctiTrampoline" "\n"
43 "_ctiTrampoline:" "\n"
46 "subl $0x24, %esp" "\n"
47 "movl $512, %esi" "\n"
48 "call *0x30(%esp)" "\n" //Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
49 "addl $0x24, %esp" "\n"
56 ".globl _ctiVMThrowTrampoline" "\n"
57 "_ctiVMThrowTrampoline:" "\n"
59 "movl 0x34(%esp), %ecx" "\n" //Ox34 = 0x0D * 4, 0x0D = CTI_ARGS_exec
60 "cmpl $0, 8(%ecx)" "\n"
65 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
66 "addl $0x24, %esp" "\n"
76 __declspec(naked) JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**)
92 __declspec(naked) void ctiVMThrowTrampoline()
96 call JSC::Machine::cti_vm_throw;
109 // get arg puts an arg from the SF register array into a h/w register
110 ALWAYS_INLINE void CTI::emitGetArg(unsigned src, X86Assembler::RegisterID dst)
112 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
113 if (src < m_codeBlock->constantRegisters.size()) {
114 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
115 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
117 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
120 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
121 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
123 if (src < m_codeBlock->constantRegisters.size()) {
124 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
125 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
127 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
128 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
132 // puts an arg onto the stack, as an arg to a context threaded function.
133 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
135 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
138 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
140 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
143 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
145 if (src < m_codeBlock->constantRegisters.size()) {
146 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
147 return JSImmediate::isNumber(js) ? js : 0;
152 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
154 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
157 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
159 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
162 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
164 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
167 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
169 m_jit.movl_rm(from, -((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi);
172 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
174 m_jit.movl_mr(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi, to);
177 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
179 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
180 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
183 ALWAYS_INLINE void CTI::emitInitialiseRegister(unsigned dst)
185 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
186 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
189 #if ENABLE(SAMPLING_TOOL)
190 unsigned inCalledCode = 0;
193 void ctiSetReturnAddress(void** where, void* what)
198 void ctiRepatchCallByReturnAddress(void* where, void* what)
200 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
205 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
211 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
213 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
214 m_jit.cmpl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
215 X86Assembler::JmpSrc noException = m_jit.emitUnlinkedJe();
217 m_jit.link(noException, m_jit.label());
220 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
223 if (src1 < m_codeBlock->constantRegisters.size()) {
224 JSValue* js = m_codeBlock->constantRegisters[src1].jsValue(m_exec);
226 JSImmediate::isImmediate(js) ?
227 (JSImmediate::isNumber(js) ? 'i' :
228 JSImmediate::isBoolean(js) ? 'b' :
229 js->isUndefined() ? 'u' :
230 js->isNull() ? 'n' : '?')
232 (js->isString() ? 's' :
233 js->isObject() ? 'o' :
237 if (src2 < m_codeBlock->constantRegisters.size()) {
238 JSValue* js = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
240 JSImmediate::isImmediate(js) ?
241 (JSImmediate::isNumber(js) ? 'i' :
242 JSImmediate::isBoolean(js) ? 'b' :
243 js->isUndefined() ? 'u' :
244 js->isNull() ? 'n' : '?')
246 (js->isString() ? 's' :
247 js->isObject() ? 'o' :
250 if ((which1 != '*') | (which2 != '*'))
251 fprintf(stderr, "Types %c %c\n", which1, which2);
256 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
258 #if ENABLE(SAMPLING_TOOL)
259 m_jit.movl_i32m(1, &inCalledCode);
261 X86Assembler::JmpSrc call = m_jit.emitCall();
262 m_calls.append(CallRecord(call, helper, opcodeIndex));
263 emitDebugExceptionCheck();
264 #if ENABLE(SAMPLING_TOOL)
265 m_jit.movl_i32m(0, &inCalledCode);
271 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
273 #if ENABLE(SAMPLING_TOOL)
274 m_jit.movl_i32m(1, &inCalledCode);
276 X86Assembler::JmpSrc call = m_jit.emitCall();
277 m_calls.append(CallRecord(call, helper, opcodeIndex));
278 emitDebugExceptionCheck();
279 #if ENABLE(SAMPLING_TOOL)
280 m_jit.movl_i32m(0, &inCalledCode);
286 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
288 #if ENABLE(SAMPLING_TOOL)
289 m_jit.movl_i32m(1, &inCalledCode);
291 X86Assembler::JmpSrc call = m_jit.emitCall();
292 m_calls.append(CallRecord(call, helper, opcodeIndex));
293 emitDebugExceptionCheck();
294 #if ENABLE(SAMPLING_TOOL)
295 m_jit.movl_i32m(0, &inCalledCode);
301 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
303 #if ENABLE(SAMPLING_TOOL)
304 m_jit.movl_i32m(1, &inCalledCode);
306 X86Assembler::JmpSrc call = m_jit.emitCall();
307 m_calls.append(CallRecord(call, helper, opcodeIndex));
308 emitDebugExceptionCheck();
309 #if ENABLE(SAMPLING_TOOL)
310 m_jit.movl_i32m(0, &inCalledCode);
316 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
318 #if ENABLE(SAMPLING_TOOL)
319 m_jit.movl_i32m(1, &inCalledCode);
321 X86Assembler::JmpSrc call = m_jit.emitCall();
322 m_calls.append(CallRecord(call, helper, opcodeIndex));
323 emitDebugExceptionCheck();
324 #if ENABLE(SAMPLING_TOOL)
325 m_jit.movl_i32m(0, &inCalledCode);
331 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
333 m_jit.testl_i32r(JSImmediate::TagMask, reg);
334 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
337 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
339 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
340 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
343 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
345 m_jit.movl_rr(reg1, X86::ecx);
346 m_jit.andl_rr(reg2, X86::ecx);
347 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
350 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
352 ASSERT(JSImmediate::isNumber(imm));
353 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
356 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
358 // op_mod relies on this being a sub - setting zf if result is 0.
359 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
362 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
364 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
367 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
369 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
372 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
374 m_jit.sarl_i8r(1, reg);
377 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
379 m_jit.addl_rr(reg, reg);
380 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
381 emitFastArithReTagImmediate(reg);
384 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
386 m_jit.addl_rr(reg, reg);
387 emitFastArithReTagImmediate(reg);
390 ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
392 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
393 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
396 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
397 : m_jit(machine->jitCodeBuffer())
400 , m_codeBlock(codeBlock)
401 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
402 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
406 #define CTI_COMPILE_BINARY_OP(name) \
408 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
409 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
410 emitCall(i, Machine::cti_##name); \
411 emitPutResult(instruction[i + 1].u.operand); \
416 #define CTI_COMPILE_UNARY_OP(name) \
418 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
419 emitCall(i, Machine::cti_##name); \
420 emitPutResult(instruction[i + 1].u.operand); \
425 #if ENABLE(SAMPLING_TOOL)
426 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
429 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
431 int dst = instruction[i + 1].u.operand;
432 int firstArg = instruction[i + 4].u.operand;
433 int argCount = instruction[i + 5].u.operand;
435 if (type == OpConstruct) {
436 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
437 emitPutArgConstant(argCount, 12);
438 emitPutArgConstant(firstArg, 8);
439 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
441 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
442 emitPutArgConstant(argCount, 12);
443 emitPutArgConstant(firstArg, 8);
444 // FIXME: should this be loaded dynamically off m_exec?
445 int thisVal = instruction[i + 3].u.operand;
446 if (thisVal == missingThisObjectMarker()) {
447 emitPutArgConstant(reinterpret_cast<unsigned>(m_exec->globalThisValue()), 4);
449 emitGetPutArg(thisVal, 4, X86::ecx);
452 X86Assembler::JmpSrc wasEval;
453 if (type == OpCallEval) {
454 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
455 emitCall(i, Machine::cti_op_call_eval);
456 m_jit.emitRestoreArgumentReference();
458 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
460 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
461 wasEval = m_jit.emitUnlinkedJne();
463 // this reloads the first arg into ecx (checked just below).
464 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
466 // this sets up the first arg, and explicitly leaves the value in ecx (checked just below).
467 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
468 emitPutArg(X86::ecx, 0);
471 // initializeCallFrame!
472 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::CallerCodeBlock) * sizeof(Register), X86::edi);
473 m_jit.movl_i32m(reinterpret_cast<unsigned>(instruction + i), (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ReturnVPC) * sizeof(Register), X86::edi);
474 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
475 m_jit.movl_rm(X86::edx, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::CallerScopeChain) * sizeof(Register), X86::edi);
476 m_jit.movl_rm(X86::edi, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::CallerRegisters) * sizeof(Register), X86::edi);
477 m_jit.movl_i32m(dst, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ReturnValueRegister) * sizeof(Register), X86::edi);
478 m_jit.movl_i32m(firstArg, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ArgumentStartRegister) * sizeof(Register), X86::edi);
479 m_jit.movl_i32m(argCount, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ArgumentCount) * sizeof(Register), X86::edi);
480 m_jit.movl_rm(X86::ecx, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::Callee) * sizeof(Register), X86::edi);
481 m_jit.movl_i32m(0, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::OptionalCalleeActivation) * sizeof(Register), X86::edi);
482 // CTIReturnEIP (set in callee)
484 // Fast check for JS function.
485 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
486 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
487 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
488 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
489 m_jit.link(isNotObject, m_jit.label());
491 // This handles host functions
492 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
493 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
495 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
496 m_jit.link(isJSFunction, m_jit.label());
498 // This handles JSFunctions
499 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction));
500 // Check the ctiCode has been generated - if not, this is handled in a slow case.
501 m_jit.testl_rr(X86::eax, X86::eax);
502 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
503 m_jit.call_r(X86::eax);
505 // In the interpreter the following actions are performed by op_ret:
507 // Store the scope chain - returned by op_ret in %edx (see below) - to ExecState::m_scopeChain and CTI_ARGS_scopeChain on the stack.
508 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
509 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
510 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
511 // Restore ExecState::m_callFrame.
512 m_jit.leal_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) * sizeof(Register), X86::edi, X86::edx);
513 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
514 // Restore CTI_ARGS_codeBlock.
515 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
517 X86Assembler::JmpDst end = m_jit.label();
518 m_jit.link(wasNotJSFunction, end);
519 if (type == OpCallEval)
520 m_jit.link(wasEval, end);
525 void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
527 bool negated = (type == OpNStrictEq);
529 unsigned dst = instruction[i + 1].u.operand;
530 unsigned src1 = instruction[i + 2].u.operand;
531 unsigned src2 = instruction[i + 3].u.operand;
533 emitGetArg(src1, X86::eax);
534 emitGetArg(src2, X86::edx);
536 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
537 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
538 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
539 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
541 m_jit.cmpl_rr(X86::edx, X86::eax);
543 m_jit.setne_r(X86::eax);
545 m_jit.sete_r(X86::eax);
546 m_jit.movzbl_rr(X86::eax, X86::eax);
547 emitTagAsBoolImmediate(X86::eax);
549 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
551 m_jit.link(firstNotImmediate, m_jit.label());
553 // check that edx is immediate but not the zero immediate
554 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
555 m_jit.setz_r(X86::ecx);
556 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
557 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
558 m_jit.sete_r(X86::edx);
559 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
560 m_jit.orl_rr(X86::ecx, X86::edx);
562 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
564 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
566 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
568 m_jit.link(secondNotImmediate, m_jit.label());
569 // check that eax is not the zero immediate (we know it must be immediate)
570 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
571 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
573 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
575 m_jit.link(bothWereImmediates, m_jit.label());
576 m_jit.link(firstWasNotImmediate, m_jit.label());
581 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
583 m_jit.subl_i8r(1, X86::esi);
584 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
585 emitCall(opcodeIndex, Machine::cti_timeout_check);
587 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
588 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_globalData), X86::ecx, X86::ecx);
589 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
590 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
591 m_jit.link(skipTimeout, m_jit.label());
594 void CTI::privateCompileMainPass()
596 if (m_codeBlock->codeType == FunctionCode) {
597 for (int i = -m_codeBlock->numVars; i < 0; i++)
598 emitInitialiseRegister(i);
600 for (size_t i = 0; i < m_codeBlock->constantRegisters.size(); ++i)
601 emitInitialiseRegister(i);
603 Instruction* instruction = m_codeBlock->instructions.begin();
604 unsigned instructionCount = m_codeBlock->instructions.size();
606 unsigned structureIDInstructionIndex = 0;
608 for (unsigned i = 0; i < instructionCount; ) {
609 m_labels[i] = m_jit.label();
611 #if ENABLE(SAMPLING_TOOL)
612 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
615 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
616 m_jit.emitRestoreArgumentReference();
617 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
619 unsigned src = instruction[i + 2].u.operand;
620 if (src < m_codeBlock->constantRegisters.size())
621 m_jit.movl_i32r(reinterpret_cast<unsigned>(m_codeBlock->constantRegisters[src].jsValue(m_exec)), X86::edx);
623 emitGetArg(src, X86::edx);
624 emitPutResult(instruction[i + 1].u.operand, X86::edx);
629 unsigned dst = instruction[i + 1].u.operand;
630 unsigned src1 = instruction[i + 2].u.operand;
631 unsigned src2 = instruction[i + 3].u.operand;
632 if (src2 < m_codeBlock->constantRegisters.size()) {
633 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
634 if (JSImmediate::isNumber(value)) {
635 emitGetArg(src1, X86::eax);
636 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
637 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
638 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
643 } else if (!(src1 < m_codeBlock->constantRegisters.size())) {
644 emitGetArg(src1, X86::eax);
645 emitGetArg(src2, X86::edx);
646 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
647 emitFastArithDeTagImmediate(X86::eax);
648 m_jit.addl_rr(X86::edx, X86::eax);
649 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
654 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
655 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
656 emitCall(i, Machine::cti_op_add);
657 emitPutResult(instruction[i + 1].u.operand);
662 if (m_codeBlock->needsFullScopeChain)
663 emitCall(i, Machine::cti_op_end);
664 emitGetArg(instruction[i + 1].u.operand, X86::eax);
665 #if ENABLE(SAMPLING_TOOL)
666 m_jit.movl_i32m(-1, ¤tOpcodeID);
668 m_jit.pushl_m(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi);
674 unsigned target = instruction[i + 1].u.operand;
675 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
680 int srcDst = instruction[i + 1].u.operand;
681 emitGetArg(srcDst, X86::eax);
682 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
683 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
684 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
685 emitPutResult(srcDst, X86::eax);
690 emitSlowScriptCheck(i);
692 unsigned target = instruction[i + 1].u.operand;
693 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
697 case op_loop_if_less: {
698 emitSlowScriptCheck(i);
700 unsigned target = instruction[i + 3].u.operand;
701 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
703 emitGetArg(instruction[i + 1].u.operand, X86::edx);
704 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
705 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
706 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
708 emitGetArg(instruction[i + 1].u.operand, X86::eax);
709 emitGetArg(instruction[i + 2].u.operand, X86::edx);
710 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
711 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
712 m_jit.cmpl_rr(X86::edx, X86::eax);
713 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
718 case op_loop_if_lesseq: {
719 emitSlowScriptCheck(i);
721 unsigned target = instruction[i + 3].u.operand;
722 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
724 emitGetArg(instruction[i + 1].u.operand, X86::edx);
725 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
726 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
727 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
729 emitGetArg(instruction[i + 1].u.operand, X86::eax);
730 emitGetArg(instruction[i + 2].u.operand, X86::edx);
731 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
732 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
733 m_jit.cmpl_rr(X86::edx, X86::eax);
734 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
739 case op_new_object: {
740 emitCall(i, Machine::cti_op_new_object);
741 emitPutResult(instruction[i + 1].u.operand);
746 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
747 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
748 // such that the StructureID & offset are always at the same distance from this.
750 emitGetArg(instruction[i + 1].u.operand, X86::eax);
751 emitGetArg(instruction[i + 3].u.operand, X86::edx);
753 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
754 X86Assembler::JmpDst hotPathBegin = m_jit.label();
755 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
756 ++structureIDInstructionIndex;
758 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
759 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
760 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
761 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
762 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
763 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
765 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
766 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
767 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
768 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
774 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
775 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
776 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
777 // to jump back to if one of these trampolies finds a match.
779 emitGetArg(instruction[i + 2].u.operand, X86::eax);
781 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
783 X86Assembler::JmpDst hotPathBegin = m_jit.label();
784 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
785 ++structureIDInstructionIndex;
787 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
788 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
789 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
790 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
791 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
793 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
794 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
795 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
796 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
801 case op_instanceof: {
802 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
803 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
804 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
805 emitCall(i, Machine::cti_op_instanceof);
806 emitPutResult(instruction[i + 1].u.operand);
811 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
812 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
813 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
814 emitCall(i, Machine::cti_op_del_by_id);
815 emitPutResult(instruction[i + 1].u.operand);
820 unsigned dst = instruction[i + 1].u.operand;
821 unsigned src1 = instruction[i + 2].u.operand;
822 unsigned src2 = instruction[i + 3].u.operand;
823 if (src1 < m_codeBlock->constantRegisters.size() || src2 < m_codeBlock->constantRegisters.size()) {
824 unsigned constant = src1;
825 unsigned nonconstant = src2;
826 if (!(src1 < m_codeBlock->constantRegisters.size())) {
830 JSValue* value = m_codeBlock->constantRegisters[constant].jsValue(m_exec);
831 if (JSImmediate::isNumber(value)) {
832 emitGetArg(nonconstant, X86::eax);
833 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
834 emitFastArithImmToInt(X86::eax);
835 m_jit.imull_i32r( X86::eax, getDeTaggedConstantImmediate(value), X86::eax);
836 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
837 emitFastArithPotentiallyReTagImmediate(X86::eax);
844 emitGetArg(src1, X86::eax);
845 emitGetArg(src2, X86::edx);
846 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
847 emitFastArithDeTagImmediate(X86::eax);
848 emitFastArithImmToInt(X86::edx);
849 m_jit.imull_rr(X86::edx, X86::eax);
850 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
851 emitFastArithPotentiallyReTagImmediate(X86::eax);
857 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
858 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
859 emitCall(i, Machine::cti_op_new_func);
860 emitPutResult(instruction[i + 1].u.operand);
865 compileOpCall(instruction, i);
869 case op_get_global_var: {
870 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
871 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
872 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
873 emitPutResult(instruction[i + 1].u.operand, X86::eax);
877 case op_put_global_var: {
878 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
879 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
880 emitGetArg(instruction[i + 3].u.operand, X86::edx);
881 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
885 case op_get_scoped_var: {
886 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
888 emitGetCTIParam(CTI_ARGS_scopeChain, X86::eax);
890 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
892 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
893 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
894 emitPutResult(instruction[i + 1].u.operand);
898 case op_put_scoped_var: {
899 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
901 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
902 emitGetArg(instruction[i + 3].u.operand, X86::eax);
904 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
906 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
907 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
912 // Check for an activation - if there is one, jump to the hook below.
913 m_jit.cmpl_i32m(0, -(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::OptionalCalleeActivation) * sizeof(Register), X86::edi);
914 X86Assembler::JmpSrc activation = m_jit.emitUnlinkedJne();
915 X86Assembler::JmpDst activated = m_jit.label();
917 // Check for a profiler - if there is one, jump to the hook below.
918 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
919 m_jit.cmpl_i32m(0, X86::eax);
920 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
921 X86Assembler::JmpDst profiled = m_jit.label();
923 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
924 if (m_codeBlock->needsFullScopeChain)
925 emitCall(i, Machine::cti_op_ret_scopeChain);
927 // Return the result in %eax, and the caller scope chain in %edx (this is read from the callee call frame,
928 // but is only assigned to ExecState::m_scopeChain if returning to a JSFunction).
929 emitGetArg(instruction[i + 1].u.operand, X86::eax);
930 m_jit.movl_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::CallerScopeChain) * sizeof(Register), X86::edi, X86::edx);
931 // Restore the machine return addess from the callframe, roll the callframe back to the caller callframe,
932 // and preserve a copy of r on the stack at CTI_ARGS_r.
933 m_jit.movl_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi, X86::ecx);
934 m_jit.movl_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::CallerRegisters) * sizeof(Register), X86::edi, X86::edi);
935 emitPutCTIParam(X86::edi, CTI_ARGS_r);
937 m_jit.pushl_r(X86::ecx);
941 m_jit.link(activation, m_jit.label());
942 emitCall(i, Machine::cti_op_ret_activation);
943 m_jit.link(m_jit.emitUnlinkedJmp(), activated);
946 m_jit.link(profile, m_jit.label());
947 emitCall(i, Machine::cti_op_ret_profiler);
948 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
954 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
955 emitPutArg(X86::edx, 0);
956 emitPutArgConstant(instruction[i + 3].u.operand, 4);
957 emitCall(i, Machine::cti_op_new_array);
958 emitPutResult(instruction[i + 1].u.operand);
963 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
964 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
965 emitCall(i, Machine::cti_op_resolve);
966 emitPutResult(instruction[i + 1].u.operand);
971 compileOpCall(instruction, i, OpConstruct);
975 case op_construct_verify: {
976 emitGetArg(instruction[i + 1].u.operand, X86::eax);
978 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
979 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
980 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
981 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
982 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
984 m_jit.link(isImmediate, m_jit.label());
985 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
986 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
987 m_jit.link(isObject, m_jit.label());
992 case op_get_by_val: {
993 emitGetArg(instruction[i + 2].u.operand, X86::eax);
994 emitGetArg(instruction[i + 3].u.operand, X86::edx);
995 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
996 emitFastArithImmToInt(X86::edx);
997 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
998 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
999 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1000 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1002 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1003 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1004 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1005 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1007 // Get the value from the vector
1008 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1009 emitPutResult(instruction[i + 1].u.operand);
1013 case op_resolve_func: {
1014 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1015 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1016 emitCall(i, Machine::cti_op_resolve_func);
1017 emitPutResult(instruction[i + 1].u.operand);
1018 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1019 emitPutResult(instruction[i + 2].u.operand);
1024 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1025 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1026 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1027 m_jit.subl_rr(X86::edx, X86::eax);
1028 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1029 emitFastArithReTagImmediate(X86::eax);
1030 emitPutResult(instruction[i + 1].u.operand);
1034 case op_put_by_val: {
1035 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1036 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1037 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1038 emitFastArithImmToInt(X86::edx);
1039 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1040 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1041 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1042 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1044 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1045 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1046 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1047 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1048 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1049 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1050 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1052 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1053 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1054 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1055 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1057 // All good - put the value into the array.
1058 m_jit.link(inFastVector, m_jit.label());
1059 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1060 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1064 CTI_COMPILE_BINARY_OP(op_lesseq)
1065 case op_loop_if_true: {
1066 emitSlowScriptCheck(i);
1068 unsigned target = instruction[i + 2].u.operand;
1069 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1071 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1072 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1073 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1074 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1076 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1077 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1078 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1079 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1081 m_jit.link(isZero, m_jit.label());
1085 case op_resolve_base: {
1086 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1087 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1088 emitCall(i, Machine::cti_op_resolve_base);
1089 emitPutResult(instruction[i + 1].u.operand);
1094 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1095 emitCall(i, Machine::cti_op_negate);
1096 emitPutResult(instruction[i + 1].u.operand);
1100 case op_resolve_skip: {
1101 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1102 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1103 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1104 emitCall(i, Machine::cti_op_resolve_skip);
1105 emitPutResult(instruction[i + 1].u.operand);
1109 case op_resolve_global: {
1111 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1112 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1113 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1114 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1116 // Check StructureID of global object
1117 m_jit.movl_i32r(globalObject, X86::eax);
1118 m_jit.movl_mr(structureIDAddr, X86::edx);
1119 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1120 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1121 m_slowCases.append(SlowCaseEntry(slowCase, i));
1123 // Load cached property
1124 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1125 m_jit.movl_mr(offsetAddr, X86::edx);
1126 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1127 emitPutResult(instruction[i + 1].u.operand);
1128 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1131 m_jit.link(slowCase, m_jit.label());
1132 emitPutArgConstant(globalObject, 0);
1133 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1134 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1135 emitCall(i, Machine::cti_op_resolve_global);
1136 emitPutResult(instruction[i + 1].u.operand);
1137 m_jit.link(end, m_jit.label());
1139 ++structureIDInstructionIndex;
1142 CTI_COMPILE_BINARY_OP(op_div)
1144 int srcDst = instruction[i + 1].u.operand;
1145 emitGetArg(srcDst, X86::eax);
1146 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1147 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1148 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1149 emitPutResult(srcDst, X86::eax);
1154 unsigned target = instruction[i + 3].u.operand;
1155 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1157 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1158 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1159 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1160 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1162 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1163 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1164 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1165 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1166 m_jit.cmpl_rr(X86::edx, X86::eax);
1167 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1173 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1174 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1175 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1176 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1177 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1178 emitPutResult(instruction[i + 1].u.operand);
1183 unsigned target = instruction[i + 2].u.operand;
1184 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1186 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1187 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1188 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1189 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1191 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1192 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1193 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1194 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1196 m_jit.link(isNonZero, m_jit.label());
1201 int srcDst = instruction[i + 2].u.operand;
1202 emitGetArg(srcDst, X86::eax);
1203 m_jit.movl_rr(X86::eax, X86::edx);
1204 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1205 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1206 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1207 emitPutResult(srcDst, X86::edx);
1208 emitPutResult(instruction[i + 1].u.operand);
1212 case op_unexpected_load: {
1213 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1214 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1215 emitPutResult(instruction[i + 1].u.operand);
1220 int retAddrDst = instruction[i + 1].u.operand;
1221 int target = instruction[i + 2].u.operand;
1222 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1223 X86Assembler::JmpDst addrPosition = m_jit.label();
1224 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1225 X86Assembler::JmpDst sretTarget = m_jit.label();
1226 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1231 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1236 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1237 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1238 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1239 m_jit.cmpl_rr(X86::edx, X86::eax);
1240 m_jit.sete_r(X86::eax);
1241 m_jit.movzbl_rr(X86::eax, X86::eax);
1242 emitTagAsBoolImmediate(X86::eax);
1243 emitPutResult(instruction[i + 1].u.operand);
1248 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1249 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1250 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1251 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1252 emitFastArithImmToInt(X86::eax);
1253 emitFastArithImmToInt(X86::ecx);
1254 m_jit.shll_CLr(X86::eax);
1255 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1256 emitPutResult(instruction[i + 1].u.operand);
1261 unsigned src1 = instruction[i + 2].u.operand;
1262 unsigned src2 = instruction[i + 3].u.operand;
1263 unsigned dst = instruction[i + 1].u.operand;
1264 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1265 emitGetArg(src2, X86::eax);
1266 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1267 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1269 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1270 emitGetArg(src1, X86::eax);
1271 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1272 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1275 emitGetArg(src1, X86::eax);
1276 emitGetArg(src2, X86::edx);
1277 m_jit.andl_rr(X86::edx, X86::eax);
1278 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1285 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1286 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1287 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1288 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1289 emitFastArithImmToInt(X86::ecx);
1290 m_jit.sarl_CLr(X86::eax);
1291 emitFastArithPotentiallyReTagImmediate(X86::eax);
1292 emitPutResult(instruction[i + 1].u.operand);
1297 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1298 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1299 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1300 emitPutResult(instruction[i + 1].u.operand);
1304 case op_resolve_with_base: {
1305 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1306 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1307 emitCall(i, Machine::cti_op_resolve_with_base);
1308 emitPutResult(instruction[i + 1].u.operand);
1309 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1310 emitPutResult(instruction[i + 2].u.operand);
1314 case op_new_func_exp: {
1315 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1316 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1317 emitCall(i, Machine::cti_op_new_func_exp);
1318 emitPutResult(instruction[i + 1].u.operand);
1323 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1324 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1325 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1326 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1327 emitFastArithDeTagImmediate(X86::eax);
1328 emitFastArithDeTagImmediate(X86::ecx);
1329 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1331 m_jit.idivl_r(X86::ecx);
1332 emitFastArithReTagImmediate(X86::edx);
1333 m_jit.movl_rr(X86::edx, X86::eax);
1334 emitPutResult(instruction[i + 1].u.operand);
1339 unsigned target = instruction[i + 2].u.operand;
1340 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1342 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1343 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1344 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1345 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1347 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1348 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1349 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1350 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1352 m_jit.link(isZero, m_jit.label());
1356 CTI_COMPILE_BINARY_OP(op_less)
1357 CTI_COMPILE_BINARY_OP(op_neq)
1359 int srcDst = instruction[i + 2].u.operand;
1360 emitGetArg(srcDst, X86::eax);
1361 m_jit.movl_rr(X86::eax, X86::edx);
1362 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1363 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1364 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1365 emitPutResult(srcDst, X86::edx);
1366 emitPutResult(instruction[i + 1].u.operand);
1370 CTI_COMPILE_BINARY_OP(op_urshift)
1372 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1373 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1374 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1375 m_jit.xorl_rr(X86::edx, X86::eax);
1376 emitFastArithReTagImmediate(X86::eax);
1377 emitPutResult(instruction[i + 1].u.operand);
1381 case op_new_regexp: {
1382 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1383 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1384 emitCall(i, Machine::cti_op_new_regexp);
1385 emitPutResult(instruction[i + 1].u.operand);
1390 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1391 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1392 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1393 m_jit.orl_rr(X86::edx, X86::eax);
1394 emitPutResult(instruction[i + 1].u.operand);
1398 case op_call_eval: {
1399 compileOpCall(instruction, i, OpCallEval);
1404 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1405 emitCall(i, Machine::cti_op_throw);
1406 m_jit.addl_i8r(0x24, X86::esp);
1407 m_jit.popl_r(X86::edi);
1408 m_jit.popl_r(X86::esi);
1413 case op_get_pnames: {
1414 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1415 emitCall(i, Machine::cti_op_get_pnames);
1416 emitPutResult(instruction[i + 1].u.operand);
1420 case op_next_pname: {
1421 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1422 unsigned target = instruction[i + 3].u.operand;
1423 emitCall(i, Machine::cti_op_next_pname);
1424 m_jit.testl_rr(X86::eax, X86::eax);
1425 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1426 emitPutResult(instruction[i + 1].u.operand);
1427 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1428 m_jit.link(endOfIter, m_jit.label());
1432 case op_push_scope: {
1433 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1434 emitCall(i, Machine::cti_op_push_scope);
1438 case op_pop_scope: {
1439 emitCall(i, Machine::cti_op_pop_scope);
1443 CTI_COMPILE_UNARY_OP(op_typeof)
1444 CTI_COMPILE_UNARY_OP(op_is_undefined)
1445 CTI_COMPILE_UNARY_OP(op_is_boolean)
1446 CTI_COMPILE_UNARY_OP(op_is_number)
1447 CTI_COMPILE_UNARY_OP(op_is_string)
1448 CTI_COMPILE_UNARY_OP(op_is_object)
1449 CTI_COMPILE_UNARY_OP(op_is_function)
1451 compileOpStrictEq(instruction, i, OpStrictEq);
1455 case op_nstricteq: {
1456 compileOpStrictEq(instruction, i, OpNStrictEq);
1460 case op_to_jsnumber: {
1461 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1462 emitCall(i, Machine::cti_op_to_jsnumber);
1463 emitPutResult(instruction[i + 1].u.operand);
1468 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1469 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1470 emitCall(i, Machine::cti_op_in);
1471 emitPutResult(instruction[i + 1].u.operand);
1475 case op_push_new_scope: {
1476 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1477 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1478 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1479 emitCall(i, Machine::cti_op_push_new_scope);
1480 emitPutResult(instruction[i + 1].u.operand);
1485 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1486 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1487 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_exception), X86::ecx, X86::eax);
1488 m_jit.movl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
1489 emitPutResult(instruction[i + 1].u.operand);
1493 case op_jmp_scopes: {
1494 unsigned count = instruction[i + 1].u.operand;
1495 emitPutArgConstant(count, 0);
1496 emitCall(i, Machine::cti_op_jmp_scopes);
1497 unsigned target = instruction[i + 2].u.operand;
1498 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1502 case op_put_by_index: {
1503 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1504 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1505 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1506 emitCall(i, Machine::cti_op_put_by_index);
1510 case op_switch_imm: {
1511 unsigned tableIndex = instruction[i + 1].u.operand;
1512 unsigned defaultOffset = instruction[i + 2].u.operand;
1513 unsigned scrutinee = instruction[i + 3].u.operand;
1515 // create jump table for switch destinations, track this switch statement.
1516 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1517 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1518 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1520 emitGetPutArg(scrutinee, 0, X86::ecx);
1521 emitPutArgConstant(tableIndex, 4);
1522 emitCall(i, Machine::cti_op_switch_imm);
1523 m_jit.jmp_r(X86::eax);
1527 case op_switch_char: {
1528 unsigned tableIndex = instruction[i + 1].u.operand;
1529 unsigned defaultOffset = instruction[i + 2].u.operand;
1530 unsigned scrutinee = instruction[i + 3].u.operand;
1532 // create jump table for switch destinations, track this switch statement.
1533 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1534 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1535 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1537 emitGetPutArg(scrutinee, 0, X86::ecx);
1538 emitPutArgConstant(tableIndex, 4);
1539 emitCall(i, Machine::cti_op_switch_char);
1540 m_jit.jmp_r(X86::eax);
1544 case op_switch_string: {
1545 unsigned tableIndex = instruction[i + 1].u.operand;
1546 unsigned defaultOffset = instruction[i + 2].u.operand;
1547 unsigned scrutinee = instruction[i + 3].u.operand;
1549 // create jump table for switch destinations, track this switch statement.
1550 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1551 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1553 emitGetPutArg(scrutinee, 0, X86::ecx);
1554 emitPutArgConstant(tableIndex, 4);
1555 emitCall(i, Machine::cti_op_switch_string);
1556 m_jit.jmp_r(X86::eax);
1560 case op_del_by_val: {
1561 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1562 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1563 emitCall(i, Machine::cti_op_del_by_val);
1564 emitPutResult(instruction[i + 1].u.operand);
1568 case op_put_getter: {
1569 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1570 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1571 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1572 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1573 emitCall(i, Machine::cti_op_put_getter);
1577 case op_put_setter: {
1578 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1579 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1580 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1581 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1582 emitCall(i, Machine::cti_op_put_setter);
1586 case op_new_error: {
1587 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1588 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1589 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1590 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1591 emitCall(i, Machine::cti_op_new_error);
1592 emitPutResult(instruction[i + 1].u.operand);
1597 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1598 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1599 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1600 emitCall(i, Machine::cti_op_debug);
1605 unsigned dst = instruction[i + 1].u.operand;
1606 unsigned src1 = instruction[i + 2].u.operand;
1608 emitGetArg(src1, X86::eax);
1609 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1610 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1612 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1613 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1614 m_jit.setnz_r(X86::eax);
1616 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1618 m_jit.link(isImmediate, m_jit.label());
1620 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1621 m_jit.andl_rr(X86::eax, X86::ecx);
1622 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1623 m_jit.sete_r(X86::eax);
1625 m_jit.link(wasNotImmediate, m_jit.label());
1627 m_jit.movzbl_rr(X86::eax, X86::eax);
1628 emitTagAsBoolImmediate(X86::eax);
1635 unsigned dst = instruction[i + 1].u.operand;
1636 unsigned src1 = instruction[i + 2].u.operand;
1638 emitGetArg(src1, X86::eax);
1639 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1640 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1642 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1643 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1644 m_jit.setz_r(X86::eax);
1646 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1648 m_jit.link(isImmediate, m_jit.label());
1650 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1651 m_jit.andl_rr(X86::eax, X86::ecx);
1652 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1653 m_jit.setne_r(X86::eax);
1655 m_jit.link(wasNotImmediate, m_jit.label());
1657 m_jit.movzbl_rr(X86::eax, X86::eax);
1658 emitTagAsBoolImmediate(X86::eax);
1664 case op_initialise_locals: {
1668 case op_get_array_length:
1669 case op_get_by_id_chain:
1670 case op_get_by_id_generic:
1671 case op_get_by_id_proto:
1672 case op_get_by_id_self:
1673 case op_get_string_length:
1674 case op_put_by_id_generic:
1675 case op_put_by_id_replace:
1676 case op_put_by_id_transition:
1677 ASSERT_NOT_REACHED();
1681 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1685 void CTI::privateCompileLinkPass()
1687 unsigned jmpTableCount = m_jmpTable.size();
1688 for (unsigned i = 0; i < jmpTableCount; ++i)
1689 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
1693 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
1695 m_jit.link(iter->from, m_jit.label()); \
1696 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
1697 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
1698 emitCall(i, Machine::cti_##name); \
1699 emitPutResult(instruction[i + 1].u.operand); \
1704 void CTI::privateCompileSlowCases()
1706 unsigned structureIDInstructionIndex = 0;
1708 Instruction* instruction = m_codeBlock->instructions.begin();
1709 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
1710 unsigned i = iter->to;
1711 m_jit.emitRestoreArgumentReference();
1712 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
1714 unsigned dst = instruction[i + 1].u.operand;
1715 unsigned src2 = instruction[i + 3].u.operand;
1716 if (src2 < m_codeBlock->constantRegisters.size()) {
1717 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
1718 if (JSImmediate::isNumber(value)) {
1719 X86Assembler::JmpSrc notImm = iter->from;
1720 m_jit.link((++iter)->from, m_jit.label());
1721 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1722 m_jit.link(notImm, m_jit.label());
1723 emitPutArg(X86::eax, 0);
1724 emitGetPutArg(src2, 4, X86::ecx);
1725 emitCall(i, Machine::cti_op_add);
1732 ASSERT(!(static_cast<unsigned>(instruction[i + 2].u.operand) < m_codeBlock->constantRegisters.size()));
1734 X86Assembler::JmpSrc notImm = iter->from;
1735 m_jit.link((++iter)->from, m_jit.label());
1736 m_jit.subl_rr(X86::edx, X86::eax);
1737 emitFastArithReTagImmediate(X86::eax);
1738 m_jit.link(notImm, m_jit.label());
1739 emitPutArg(X86::eax, 0);
1740 emitPutArg(X86::edx, 4);
1741 emitCall(i, Machine::cti_op_add);
1746 case op_get_by_val: {
1747 // The slow case that handles accesses to arrays (below) may jump back up to here.
1748 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
1750 X86Assembler::JmpSrc notImm = iter->from;
1751 m_jit.link((++iter)->from, m_jit.label());
1752 m_jit.link((++iter)->from, m_jit.label());
1753 emitFastArithIntToImmNoCheck(X86::edx);
1754 m_jit.link(notImm, m_jit.label());
1755 emitPutArg(X86::eax, 0);
1756 emitPutArg(X86::edx, 4);
1757 emitCall(i, Machine::cti_op_get_by_val);
1758 emitPutResult(instruction[i + 1].u.operand);
1759 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1761 // This is slow case that handles accesses to arrays above the fast cut-off.
1762 // First, check if this is an access to the vector
1763 m_jit.link((++iter)->from, m_jit.label());
1764 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1765 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
1767 // okay, missed the fast region, but it is still in the vector. Get the value.
1768 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
1769 // Check whether the value loaded is zero; if so we need to return undefined.
1770 m_jit.testl_rr(X86::ecx, X86::ecx);
1771 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
1772 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1778 X86Assembler::JmpSrc notImm = iter->from;
1779 m_jit.link((++iter)->from, m_jit.label());
1780 m_jit.addl_rr(X86::edx, X86::eax);
1781 m_jit.link(notImm, m_jit.label());
1782 emitPutArg(X86::eax, 0);
1783 emitPutArg(X86::edx, 4);
1784 emitCall(i, Machine::cti_op_sub);
1785 emitPutResult(instruction[i + 1].u.operand);
1790 m_jit.link(iter->from, m_jit.label());
1791 m_jit.link((++iter)->from, m_jit.label());
1792 emitPutArg(X86::eax, 0);
1793 emitPutArg(X86::ecx, 4);
1794 emitCall(i, Machine::cti_op_rshift);
1795 emitPutResult(instruction[i + 1].u.operand);
1800 X86Assembler::JmpSrc notImm1 = iter->from;
1801 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1802 m_jit.link((++iter)->from, m_jit.label());
1803 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1804 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1805 m_jit.link(notImm1, m_jit.label());
1806 m_jit.link(notImm2, m_jit.label());
1807 emitPutArg(X86::eax, 0);
1808 emitPutArg(X86::ecx, 4);
1809 emitCall(i, Machine::cti_op_lshift);
1810 emitPutResult(instruction[i + 1].u.operand);
1814 case op_loop_if_less: {
1815 emitSlowScriptCheck(i);
1817 unsigned target = instruction[i + 3].u.operand;
1818 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1820 m_jit.link(iter->from, m_jit.label());
1821 emitPutArg(X86::edx, 0);
1822 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1823 emitCall(i, Machine::cti_op_loop_if_less);
1824 m_jit.testl_rr(X86::eax, X86::eax);
1825 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1827 m_jit.link(iter->from, m_jit.label());
1828 m_jit.link((++iter)->from, m_jit.label());
1829 emitPutArg(X86::eax, 0);
1830 emitPutArg(X86::edx, 4);
1831 emitCall(i, Machine::cti_op_loop_if_less);
1832 m_jit.testl_rr(X86::eax, X86::eax);
1833 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1838 case op_put_by_id: {
1839 m_jit.link(iter->from, m_jit.label());
1840 m_jit.link((++iter)->from, m_jit.label());
1842 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1843 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1844 emitPutArg(X86::eax, 0);
1845 emitPutArg(X86::edx, 8);
1846 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
1848 // Track the location of the call; this will be used to recover repatch information.
1849 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1850 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1851 ++structureIDInstructionIndex;
1856 case op_get_by_id: {
1857 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1858 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1859 // of the call (which we can use to look up the repatch information), but should a array-length or
1860 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back
1861 // the distance from the call to the head of the slow case.
1863 m_jit.link(iter->from, m_jit.label());
1864 m_jit.link((++iter)->from, m_jit.label());
1867 X86Assembler::JmpDst coldPathBegin = m_jit.label();
1869 emitPutArg(X86::eax, 0);
1870 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1871 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1872 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
1873 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
1874 emitPutResult(instruction[i + 1].u.operand);
1876 // Track the location of the call; this will be used to recover repatch information.
1877 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1878 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1879 ++structureIDInstructionIndex;
1884 case op_resolve_global: {
1885 ++structureIDInstructionIndex;
1889 case op_loop_if_lesseq: {
1890 emitSlowScriptCheck(i);
1892 unsigned target = instruction[i + 3].u.operand;
1893 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1895 m_jit.link(iter->from, m_jit.label());
1896 emitPutArg(X86::edx, 0);
1897 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1898 emitCall(i, Machine::cti_op_loop_if_lesseq);
1899 m_jit.testl_rr(X86::eax, X86::eax);
1900 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1902 m_jit.link(iter->from, m_jit.label());
1903 m_jit.link((++iter)->from, m_jit.label());
1904 emitPutArg(X86::eax, 0);
1905 emitPutArg(X86::edx, 4);
1906 emitCall(i, Machine::cti_op_loop_if_lesseq);
1907 m_jit.testl_rr(X86::eax, X86::eax);
1908 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1914 unsigned srcDst = instruction[i + 1].u.operand;
1915 X86Assembler::JmpSrc notImm = iter->from;
1916 m_jit.link((++iter)->from, m_jit.label());
1917 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1918 m_jit.link(notImm, m_jit.label());
1919 emitPutArg(X86::eax, 0);
1920 emitCall(i, Machine::cti_op_pre_inc);
1921 emitPutResult(srcDst);
1925 case op_put_by_val: {
1926 // Normal slow cases - either is not an immediate imm, or is an array.
1927 X86Assembler::JmpSrc notImm = iter->from;
1928 m_jit.link((++iter)->from, m_jit.label());
1929 m_jit.link((++iter)->from, m_jit.label());
1930 emitFastArithIntToImmNoCheck(X86::edx);
1931 m_jit.link(notImm, m_jit.label());
1932 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1933 emitPutArg(X86::eax, 0);
1934 emitPutArg(X86::edx, 4);
1935 emitPutArg(X86::ecx, 8);
1936 emitCall(i, Machine::cti_op_put_by_val);
1937 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1939 // slow cases for immediate int accesses to arrays
1940 m_jit.link((++iter)->from, m_jit.label());
1941 m_jit.link((++iter)->from, m_jit.label());
1942 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1943 emitPutArg(X86::eax, 0);
1944 emitPutArg(X86::edx, 4);
1945 emitPutArg(X86::ecx, 8);
1946 emitCall(i, Machine::cti_op_put_by_val_array);
1951 case op_loop_if_true: {
1952 emitSlowScriptCheck(i);
1954 m_jit.link(iter->from, m_jit.label());
1955 emitPutArg(X86::eax, 0);
1956 emitCall(i, Machine::cti_op_jtrue);
1957 m_jit.testl_rr(X86::eax, X86::eax);
1958 unsigned target = instruction[i + 2].u.operand;
1959 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
1964 unsigned srcDst = instruction[i + 1].u.operand;
1965 X86Assembler::JmpSrc notImm = iter->from;
1966 m_jit.link((++iter)->from, m_jit.label());
1967 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1968 m_jit.link(notImm, m_jit.label());
1969 emitPutArg(X86::eax, 0);
1970 emitCall(i, Machine::cti_op_pre_dec);
1971 emitPutResult(srcDst);
1976 unsigned target = instruction[i + 3].u.operand;
1977 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1979 m_jit.link(iter->from, m_jit.label());
1980 emitPutArg(X86::edx, 0);
1981 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1982 emitCall(i, Machine::cti_op_jless);
1983 m_jit.testl_rr(X86::eax, X86::eax);
1984 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1986 m_jit.link(iter->from, m_jit.label());
1987 m_jit.link((++iter)->from, m_jit.label());
1988 emitPutArg(X86::eax, 0);
1989 emitPutArg(X86::edx, 4);
1990 emitCall(i, Machine::cti_op_jless);
1991 m_jit.testl_rr(X86::eax, X86::eax);
1992 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1998 m_jit.link(iter->from, m_jit.label());
1999 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2000 emitPutArg(X86::eax, 0);
2001 emitCall(i, Machine::cti_op_not);
2002 emitPutResult(instruction[i + 1].u.operand);
2007 m_jit.link(iter->from, m_jit.label());
2008 emitPutArg(X86::eax, 0);
2009 emitCall(i, Machine::cti_op_jtrue);
2010 m_jit.testl_rr(X86::eax, X86::eax);
2011 unsigned target = instruction[i + 2].u.operand;
2012 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2017 unsigned srcDst = instruction[i + 2].u.operand;
2018 m_jit.link(iter->from, m_jit.label());
2019 m_jit.link((++iter)->from, m_jit.label());
2020 emitPutArg(X86::eax, 0);
2021 emitCall(i, Machine::cti_op_post_inc);
2022 emitPutResult(instruction[i + 1].u.operand);
2023 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2024 emitPutResult(srcDst);
2029 m_jit.link(iter->from, m_jit.label());
2030 emitPutArg(X86::eax, 0);
2031 emitCall(i, Machine::cti_op_bitnot);
2032 emitPutResult(instruction[i + 1].u.operand);
2037 unsigned src1 = instruction[i + 2].u.operand;
2038 unsigned src2 = instruction[i + 3].u.operand;
2039 unsigned dst = instruction[i + 1].u.operand;
2040 if (getConstantImmediateNumericArg(src1)) {
2041 m_jit.link(iter->from, m_jit.label());
2042 emitGetPutArg(src1, 0, X86::ecx);
2043 emitPutArg(X86::eax, 4);
2044 emitCall(i, Machine::cti_op_bitand);
2046 } else if (getConstantImmediateNumericArg(src2)) {
2047 m_jit.link(iter->from, m_jit.label());
2048 emitPutArg(X86::eax, 0);
2049 emitGetPutArg(src2, 4, X86::ecx);
2050 emitCall(i, Machine::cti_op_bitand);
2053 m_jit.link(iter->from, m_jit.label());
2054 emitGetPutArg(src1, 0, X86::ecx);
2055 emitPutArg(X86::edx, 4);
2056 emitCall(i, Machine::cti_op_bitand);
2063 m_jit.link(iter->from, m_jit.label());
2064 emitPutArg(X86::eax, 0);
2065 emitCall(i, Machine::cti_op_jtrue);
2066 m_jit.testl_rr(X86::eax, X86::eax);
2067 unsigned target = instruction[i + 2].u.operand;
2068 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2073 unsigned srcDst = instruction[i + 2].u.operand;
2074 m_jit.link(iter->from, m_jit.label());
2075 m_jit.link((++iter)->from, m_jit.label());
2076 emitPutArg(X86::eax, 0);
2077 emitCall(i, Machine::cti_op_post_dec);
2078 emitPutResult(instruction[i + 1].u.operand);
2079 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2080 emitPutResult(srcDst);
2085 m_jit.link(iter->from, m_jit.label());
2086 emitPutArg(X86::eax, 0);
2087 emitPutArg(X86::edx, 4);
2088 emitCall(i, Machine::cti_op_bitxor);
2089 emitPutResult(instruction[i + 1].u.operand);
2094 m_jit.link(iter->from, m_jit.label());
2095 emitPutArg(X86::eax, 0);
2096 emitPutArg(X86::edx, 4);
2097 emitCall(i, Machine::cti_op_bitor);
2098 emitPutResult(instruction[i + 1].u.operand);
2103 m_jit.link(iter->from, m_jit.label());
2104 emitPutArg(X86::eax, 0);
2105 emitPutArg(X86::edx, 4);
2106 emitCall(i, Machine::cti_op_eq);
2107 emitPutResult(instruction[i + 1].u.operand);
2110 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2111 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2114 X86Assembler::JmpSrc notImm1 = iter->from;
2115 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2116 m_jit.link((++iter)->from, m_jit.label());
2117 emitFastArithReTagImmediate(X86::eax);
2118 emitFastArithReTagImmediate(X86::ecx);
2119 m_jit.link(notImm1, m_jit.label());
2120 m_jit.link(notImm2, m_jit.label());
2121 emitPutArg(X86::eax, 0);
2122 emitPutArg(X86::ecx, 4);
2123 emitCall(i, Machine::cti_op_mod);
2124 emitPutResult(instruction[i + 1].u.operand);
2128 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_mul);
2132 case op_construct: {
2133 m_jit.link(iter->from, m_jit.label());
2134 m_jit.emitRestoreArgumentReference();
2136 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2137 emitCall(i, Machine::cti_vm_compile);
2138 m_jit.call_r(X86::eax);
2140 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2142 // In the interpreter the following actions are performed by op_ret:
2144 // Store the scope chain - returned by op_ret in %edx (see below) - to ExecState::m_scopeChain and CTI_ARGS_scopeChain on the stack.
2145 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
2146 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
2147 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
2148 // Restore ExecState::m_callFrame.
2149 m_jit.leal_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) * sizeof(Register), X86::edi, X86::edx);
2150 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
2151 // Restore CTI_ARGS_codeBlock.
2152 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
2154 emitPutResult(instruction[i + 1].u.operand);
2160 ASSERT_NOT_REACHED();
2164 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2167 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2170 void CTI::privateCompile()
2172 // Could use a popl_m, but would need to offset the following instruction if so.
2173 m_jit.popl_r(X86::ecx);
2174 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
2175 emitPutToCallFrameHeader(X86::ecx, RegisterFile::CTIReturnEIP);
2177 // Lazy copy of the scopeChain
2178 X86Assembler::JmpSrc callToUpdateScopeChain;
2179 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain) {
2180 m_jit.emitRestoreArgumentReference();
2181 callToUpdateScopeChain = m_jit.emitCall();
2184 privateCompileMainPass();
2185 privateCompileLinkPass();
2186 privateCompileSlowCases();
2188 ASSERT(m_jmpTable.isEmpty());
2190 void* code = m_jit.copy();
2193 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2194 for (unsigned i = 0; i < m_switches.size(); ++i) {
2195 SwitchRecord record = m_switches[i];
2196 unsigned opcodeIndex = record.m_opcodeIndex;
2198 if (record.m_type != SwitchRecord::String) {
2199 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2200 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2202 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2204 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2205 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2206 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2209 ASSERT(record.m_type == SwitchRecord::String);
2211 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2213 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2214 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2215 unsigned offset = it->second.branchOffset;
2216 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2221 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2222 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2224 // FIXME: There doesn't seem to be a way to hint to a hashmap that it should make a certain capacity available;
2225 // could be faster if we could do something like this:
2226 // m_codeBlock->ctiReturnAddressVPCMap.grow(m_calls.size());
2227 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2228 X86Assembler::link(code, iter->from, iter->to);
2229 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2232 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain)
2233 X86Assembler::link(code, callToUpdateScopeChain, (void*)Machine::cti_vm_updateScopeChain);
2235 // Link absolute addresses for jsr
2236 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2237 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2239 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2240 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2241 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2242 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2245 m_codeBlock->ctiCode = code;
2248 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2250 // Check eax is an object of the right StructureID.
2251 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2252 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2253 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2254 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2256 // Checks out okay! - getDirectOffset
2257 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2258 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2261 void* code = m_jit.copy();
2264 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2265 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2267 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2269 ctiRepatchCallByReturnAddress(returnAddress, code);
2272 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2274 #if USE(CTI_REPATCH_PIC)
2275 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2277 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2278 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2280 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2281 // referencing the prototype object - let's speculatively load it's table nice and early!)
2282 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2283 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2284 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2286 // check eax is an object of the right StructureID.
2287 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2288 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2289 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2290 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2292 // Check the prototype object's StructureID had not changed.
2293 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2294 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2295 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2297 // Checks out okay! - getDirectOffset
2298 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2300 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2302 void* code = m_jit.copy();
2305 // Use the repatch information to link the failure cases back to the original slow case routine.
2306 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2307 X86Assembler::link(code, failureCases1, slowCaseBegin);
2308 X86Assembler::link(code, failureCases2, slowCaseBegin);
2309 X86Assembler::link(code, failureCases3, slowCaseBegin);
2311 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2312 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2313 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2315 // Track the stub we have created so that it will be deleted later.
2316 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2318 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2319 // FIXME: should revert this repatching, on failure.
2320 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2321 X86Assembler::repatchBranchOffset(jmpLocation, code);
2323 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2324 // referencing the prototype object - let's speculatively load it's table nice and early!)
2325 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2326 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2327 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2329 // check eax is an object of the right StructureID.
2330 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2331 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2332 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2333 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2335 // Check the prototype object's StructureID had not changed.
2336 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2337 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2338 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2340 // Checks out okay! - getDirectOffset
2341 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2345 void* code = m_jit.copy();
2348 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2349 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2350 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2352 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2354 ctiRepatchCallByReturnAddress(returnAddress, code);
2358 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2362 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2364 // Check eax is an object of the right StructureID.
2365 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2366 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2367 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2368 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2370 StructureID* currStructureID = structureID;
2371 RefPtr<StructureID>* chainEntries = chain->head();
2372 JSObject* protoObject = 0;
2373 for (unsigned i = 0; i<count; ++i) {
2374 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2375 currStructureID = chainEntries[i].get();
2377 // Check the prototype object's StructureID had not changed.
2378 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2379 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2380 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2382 ASSERT(protoObject);
2384 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2385 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2386 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2389 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2391 void* code = m_jit.copy();
2394 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2395 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2397 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2399 ctiRepatchCallByReturnAddress(returnAddress, code);
2402 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2404 // check eax is an object of the right StructureID.
2405 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2406 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2407 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2408 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2410 // checks out okay! - putDirectOffset
2411 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2412 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2415 void* code = m_jit.copy();
2418 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2419 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2421 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2423 ctiRepatchCallByReturnAddress(returnAddress, code);
2428 static JSValue* SFX_CALL transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2430 StructureID* oldStructureID = newStructureID->previousID();
2432 baseObject->transitionTo(newStructureID);
2434 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2435 baseObject->allocatePropertyStorage(oldStructureID->propertyMap().storageSize(), oldStructureID->propertyMap().size());
2437 baseObject->putDirectOffset(cachedOffset, value);
2443 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2445 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2448 if (oldStructureID->propertyMap().storageSize() < JSObject::inlineStorageCapacity)
2451 if (oldStructureID->propertyMap().size() != newStructureID->propertyMap().size())
2457 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2459 Vector<X86Assembler::JmpSrc, 16> failureCases;
2460 // check eax is an object of the right StructureID.
2461 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2462 failureCases.append(m_jit.emitUnlinkedJne());
2463 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2464 failureCases.append(m_jit.emitUnlinkedJne());
2465 Vector<X86Assembler::JmpSrc> successCases;
2468 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2469 // proto(ecx) = baseObject->structureID()->prototype()
2470 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2471 failureCases.append(m_jit.emitUnlinkedJne());
2472 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2474 // ecx = baseObject->m_structureID
2475 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2476 // null check the prototype
2477 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2478 successCases.append(m_jit.emitUnlinkedJe());
2480 // Check the structure id
2481 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2482 failureCases.append(m_jit.emitUnlinkedJne());
2484 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2485 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2486 failureCases.append(m_jit.emitUnlinkedJne());
2487 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2490 failureCases.append(m_jit.emitUnlinkedJne());
2491 for (unsigned i = 0; i < successCases.size(); ++i)
2492 m_jit.link(successCases[i], m_jit.label());
2494 X86Assembler::JmpSrc callTarget;
2495 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2496 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2497 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2498 // codeblock should ensure oldStructureID->m_refCount > 0
2499 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2500 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2501 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2504 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2505 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2507 // Slow case transition -- we're going to need to quite a bit of work,
2508 // so just make a call
2509 m_jit.pushl_r(X86::edx);
2510 m_jit.pushl_r(X86::eax);
2511 m_jit.movl_i32r(cachedOffset, X86::eax);
2512 m_jit.pushl_r(X86::eax);
2513 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2514 m_jit.pushl_r(X86::eax);
2515 callTarget = m_jit.emitCall();
2516 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2519 void* code = m_jit.copy();
2522 for (unsigned i = 0; i < failureCases.size(); ++i)
2523 X86Assembler::link(code, failureCases[i], reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2525 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2526 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2528 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2530 ctiRepatchCallByReturnAddress(returnAddress, code);
2533 void* CTI::privateCompileArrayLengthTrampoline()
2535 // Check eax is an array
2536 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2537 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2538 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2539 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2541 // Checks out okay! - get the length from the storage
2542 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2543 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2545 m_jit.addl_rr(X86::eax, X86::eax);
2546 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2547 m_jit.addl_i8r(1, X86::eax);
2551 void* code = m_jit.copy();
2554 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2555 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2556 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2561 void* CTI::privateCompileStringLengthTrampoline()
2563 // Check eax is a string
2564 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2565 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2566 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2567 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2569 // Checks out okay! - get the length from the Ustring.
2570 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2571 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2573 m_jit.addl_rr(X86::eax, X86::eax);
2574 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2575 m_jit.addl_i8r(1, X86::eax);
2579 void* code = m_jit.copy();
2582 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2583 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2584 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2589 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2591 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2593 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2594 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2595 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2597 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2598 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2599 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2602 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2604 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2606 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2607 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2608 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2610 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2611 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2612 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2615 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2617 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2619 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2620 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2622 // Check eax is an array
2623 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2624 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2625 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2626 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2628 // Checks out okay! - get the length from the storage
2629 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2630 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2632 m_jit.addl_rr(X86::ecx, X86::ecx);
2633 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2634 m_jit.addl_i8r(1, X86::ecx);
2636 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2638 void* code = m_jit.copy();
2641 // Use the repatch information to link the failure cases back to the original slow case routine.
2642 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2643 X86Assembler::link(code, failureCases1, slowCaseBegin);
2644 X86Assembler::link(code, failureCases2, slowCaseBegin);
2645 X86Assembler::link(code, failureCases3, slowCaseBegin);
2647 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2648 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2649 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2651 // Track the stub we have created so that it will be deleted later.
2652 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2654 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2655 // FIXME: should revert this repatching, on failure.
2656 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2657 X86Assembler::repatchBranchOffset(jmpLocation, code);
2660 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
2662 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
2663 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
2664 m_jit.movl_mr(index * sizeof(Register), dst, dst);
2667 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
2669 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
2670 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
2671 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
2676 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
2678 // TODO: better error messages
2679 if (pattern.size() > MaxPatternSize) {
2680 *error_ptr = "regular expression too large";
2684 X86Assembler jit(exec->machine()->jitCodeBuffer());
2685 WRECParser parser(pattern, ignoreCase, multiline, jit);
2687 jit.emitConvertToFastCall();
2689 // Preserve regs & initialize outputRegister.
2690 jit.pushl_r(WRECGenerator::outputRegister);
2691 jit.pushl_r(WRECGenerator::currentValueRegister);
2692 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
2693 jit.pushl_r(WRECGenerator::currentPositionRegister);
2694 // load output pointer
2699 , X86::esp, WRECGenerator::outputRegister);
2701 // restart point on match fail.
2702 WRECGenerator::JmpDst nextLabel = jit.label();
2704 // (1) Parse Disjunction:
2706 // Parsing the disjunction should fully consume the pattern.
2707 JmpSrcVector failures;
2708 parser.parseDisjunction(failures);
2709 if (parser.isEndOfPattern()) {
2710 parser.m_err = WRECParser::Error_malformedPattern;
2713 // TODO: better error messages
2714 *error_ptr = "TODO: better error messages";
2719 // Set return value & pop registers from the stack.
2721 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
2722 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
2724 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
2725 jit.popl_r(X86::eax);
2726 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2727 jit.popl_r(WRECGenerator::currentValueRegister);
2728 jit.popl_r(WRECGenerator::outputRegister);
2731 jit.link(noOutput, jit.label());
2733 jit.popl_r(X86::eax);
2734 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2735 jit.popl_r(WRECGenerator::currentValueRegister);
2736 jit.popl_r(WRECGenerator::outputRegister);
2740 // All fails link to here. Progress the start point & if it is within scope, loop.
2741 // Otherwise, return fail value.
2742 WRECGenerator::JmpDst here = jit.label();
2743 for (unsigned i = 0; i < failures.size(); ++i)
2744 jit.link(failures[i], here);
2747 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
2748 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
2749 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
2750 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
2751 jit.link(jit.emitUnlinkedJle(), nextLabel);
2753 jit.addl_i8r(4, X86::esp);
2755 jit.movl_i32r(-1, X86::eax);
2756 jit.popl_r(WRECGenerator::currentValueRegister);
2757 jit.popl_r(WRECGenerator::outputRegister);
2760 *numSubpatterns_ptr = parser.m_numSubpatterns;
2762 void* code = jit.copy();
2767 #endif // ENABLE(WREC)
2771 #endif // ENABLE(CTI)