2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
34 #include "wrec/WREC.h"
40 #if COMPILER(GCC) && PLATFORM(X86)
42 ".globl _ctiTrampoline" "\n"
43 "_ctiTrampoline:" "\n"
46 "subl $0x24, %esp" "\n"
47 "movl $512, %esi" "\n"
48 "call *0x30(%esp)" "\n" //Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
49 "addl $0x24, %esp" "\n"
56 ".globl _ctiVMThrowTrampoline" "\n"
57 "_ctiVMThrowTrampoline:" "\n"
59 "movl 0x34(%esp), %ecx" "\n" //Ox34 = 0x0D * 4, 0x0D = CTI_ARGS_exec
60 "cmpl $0, 8(%ecx)" "\n"
65 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
66 "addl $0x24, %esp" "\n"
76 __declspec(naked) JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**)
92 __declspec(naked) void ctiVMThrowTrampoline()
96 call JSC::Machine::cti_vm_throw;
109 // get arg puts an arg from the SF register array into a h/w register
110 ALWAYS_INLINE void CTI::emitGetArg(unsigned src, X86Assembler::RegisterID dst)
112 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
113 if (src < m_codeBlock->constantRegisters.size()) {
114 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
115 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
117 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
120 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
121 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
123 if (src < m_codeBlock->constantRegisters.size()) {
124 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
125 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
127 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
128 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
132 // puts an arg onto the stack, as an arg to a context threaded function.
133 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
135 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
138 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
140 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
143 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
145 if (src < m_codeBlock->constantRegisters.size()) {
146 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
147 return JSImmediate::isNumber(js) ? js : 0;
152 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
154 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
157 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
159 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
162 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
164 m_jit.movl_rm(from, -((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi);
167 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
169 m_jit.movl_mr(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi, to);
172 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
174 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
175 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
178 #if ENABLE(SAMPLING_TOOL)
179 unsigned inCalledCode = 0;
182 void ctiSetReturnAddress(void** where, void* what)
187 void ctiRepatchCallByReturnAddress(void* where, void* what)
189 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
194 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
200 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
202 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
203 m_jit.cmpl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
204 X86Assembler::JmpSrc noException = m_jit.emitUnlinkedJe();
206 m_jit.link(noException, m_jit.label());
209 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
212 if (src1 < m_codeBlock->constantRegisters.size()) {
213 JSValue* js = m_codeBlock->constantRegisters[src1].jsValue(m_exec);
215 JSImmediate::isImmediate(js) ?
216 (JSImmediate::isNumber(js) ? 'i' :
217 JSImmediate::isBoolean(js) ? 'b' :
218 js->isUndefined() ? 'u' :
219 js->isNull() ? 'n' : '?')
221 (js->isString() ? 's' :
222 js->isObject() ? 'o' :
226 if (src2 < m_codeBlock->constantRegisters.size()) {
227 JSValue* js = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
229 JSImmediate::isImmediate(js) ?
230 (JSImmediate::isNumber(js) ? 'i' :
231 JSImmediate::isBoolean(js) ? 'b' :
232 js->isUndefined() ? 'u' :
233 js->isNull() ? 'n' : '?')
235 (js->isString() ? 's' :
236 js->isObject() ? 'o' :
239 if ((which1 != '*') | (which2 != '*'))
240 fprintf(stderr, "Types %c %c\n", which1, which2);
245 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
247 #if ENABLE(SAMPLING_TOOL)
248 m_jit.movl_i32m(1, &inCalledCode);
250 X86Assembler::JmpSrc call = m_jit.emitCall();
251 m_calls.append(CallRecord(call, helper, opcodeIndex));
252 emitDebugExceptionCheck();
253 #if ENABLE(SAMPLING_TOOL)
254 m_jit.movl_i32m(0, &inCalledCode);
260 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
262 #if ENABLE(SAMPLING_TOOL)
263 m_jit.movl_i32m(1, &inCalledCode);
265 X86Assembler::JmpSrc call = m_jit.emitCall();
266 m_calls.append(CallRecord(call, helper, opcodeIndex));
267 emitDebugExceptionCheck();
268 #if ENABLE(SAMPLING_TOOL)
269 m_jit.movl_i32m(0, &inCalledCode);
275 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
277 #if ENABLE(SAMPLING_TOOL)
278 m_jit.movl_i32m(1, &inCalledCode);
280 X86Assembler::JmpSrc call = m_jit.emitCall();
281 m_calls.append(CallRecord(call, helper, opcodeIndex));
282 emitDebugExceptionCheck();
283 #if ENABLE(SAMPLING_TOOL)
284 m_jit.movl_i32m(0, &inCalledCode);
290 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
292 #if ENABLE(SAMPLING_TOOL)
293 m_jit.movl_i32m(1, &inCalledCode);
295 X86Assembler::JmpSrc call = m_jit.emitCall();
296 m_calls.append(CallRecord(call, helper, opcodeIndex));
297 emitDebugExceptionCheck();
298 #if ENABLE(SAMPLING_TOOL)
299 m_jit.movl_i32m(0, &inCalledCode);
305 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
307 #if ENABLE(SAMPLING_TOOL)
308 m_jit.movl_i32m(1, &inCalledCode);
310 X86Assembler::JmpSrc call = m_jit.emitCall();
311 m_calls.append(CallRecord(call, helper, opcodeIndex));
312 emitDebugExceptionCheck();
313 #if ENABLE(SAMPLING_TOOL)
314 m_jit.movl_i32m(0, &inCalledCode);
320 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
322 m_jit.testl_i32r(JSImmediate::TagMask, reg);
323 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
326 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImm(X86Assembler::RegisterID reg, unsigned opcodeIndex)
328 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
329 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
332 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImms(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
334 m_jit.movl_rr(reg1, X86::ecx);
335 m_jit.andl_rr(reg2, X86::ecx);
336 emitJumpSlowCaseIfNotImm(X86::ecx, opcodeIndex);
339 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
341 ASSERT(JSImmediate::isNumber(imm));
342 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
345 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
347 // op_mod relies on this being a sub - setting zf if result is 0.
348 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
351 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
353 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
356 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
358 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
361 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
363 m_jit.sarl_i8r(1, reg);
366 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
368 m_jit.addl_rr(reg, reg);
369 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
370 emitFastArithReTagImmediate(reg);
373 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
375 m_jit.addl_rr(reg, reg);
376 emitFastArithReTagImmediate(reg);
379 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
380 : m_jit(machine->jitCodeBuffer())
383 , m_codeBlock(codeBlock)
384 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
385 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
389 #define CTI_COMPILE_BINARY_OP(name) \
391 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
392 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
393 emitCall(i, Machine::cti_##name); \
394 emitPutResult(instruction[i + 1].u.operand); \
399 #define CTI_COMPILE_UNARY_OP(name) \
401 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
402 emitCall(i, Machine::cti_##name); \
403 emitPutResult(instruction[i + 1].u.operand); \
408 #if ENABLE(SAMPLING_TOOL)
409 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
412 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
414 if (type == OpConstruct) {
415 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
416 emitPutArgConstant(instruction[i + 5].u.operand, 12);
417 emitPutArgConstant(instruction[i + 4].u.operand, 8);
418 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
420 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
421 emitPutArgConstant(instruction[i + 5].u.operand, 12);
422 emitPutArgConstant(instruction[i + 4].u.operand, 8);
423 // FIXME: should this be loaded dynamically off m_exec?
424 int thisVal = instruction[i + 3].u.operand;
425 if (thisVal == missingThisObjectMarker()) {
426 emitPutArgConstant(reinterpret_cast<unsigned>(m_exec->globalThisValue()), 4);
428 emitGetPutArg(thisVal, 4, X86::ecx);
431 X86Assembler::JmpSrc wasEval;
432 if (type == OpCallEval) {
433 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
434 emitCall(i, Machine::cti_op_call_eval);
435 m_jit.emitRestoreArgumentReference();
437 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
439 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
440 wasEval = m_jit.emitUnlinkedJne();
442 // this reloads the first arg into ecx (checked just below).
443 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
445 // this sets up the first arg, and explicitly leaves the value in ecx (checked just below).
446 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
447 emitPutArg(X86::ecx, 0);
450 // Fast check for JS function.
451 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
452 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
453 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
454 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
455 m_jit.link(isNotObject, m_jit.label());
457 // This handles host functions
458 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
459 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
461 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
462 m_jit.link(isJSFunction, m_jit.label());
464 // This handles JSFunctions
465 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction));
466 m_jit.call_r(X86::eax);
467 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
469 X86Assembler::JmpDst end = m_jit.label();
470 m_jit.link(wasNotJSFunction, end);
471 if (type == OpCallEval)
472 m_jit.link(wasEval, end);
474 emitPutResult(instruction[i + 1].u.operand);
477 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
479 m_jit.subl_i8r(1, X86::esi);
480 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
481 emitCall(opcodeIndex, Machine::cti_timeout_check);
483 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
484 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_globalData), X86::ecx, X86::ecx);
485 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
486 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
487 m_jit.link(skipTimeout, m_jit.label());
490 void CTI::privateCompileMainPass()
492 Instruction* instruction = m_codeBlock->instructions.begin();
493 unsigned instructionCount = m_codeBlock->instructions.size();
495 unsigned structureIDInstructionIndex = 0;
497 for (unsigned i = 0; i < instructionCount; ) {
498 m_labels[i] = m_jit.label();
500 #if ENABLE(SAMPLING_TOOL)
501 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
504 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
505 m_jit.emitRestoreArgumentReference();
506 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
508 unsigned src = instruction[i + 2].u.operand;
509 if (src < m_codeBlock->constantRegisters.size())
510 m_jit.movl_i32r(reinterpret_cast<unsigned>(m_codeBlock->constantRegisters[src].jsValue(m_exec)), X86::edx);
512 emitGetArg(src, X86::edx);
513 emitPutResult(instruction[i + 1].u.operand, X86::edx);
518 unsigned dst = instruction[i + 1].u.operand;
519 unsigned src1 = instruction[i + 2].u.operand;
520 unsigned src2 = instruction[i + 3].u.operand;
521 if (src2 < m_codeBlock->constantRegisters.size()) {
522 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
523 if (JSImmediate::isNumber(value)) {
524 emitGetArg(src1, X86::eax);
525 emitJumpSlowCaseIfNotImm(X86::eax, i);
526 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
527 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
532 } else if (!(src1 < m_codeBlock->constantRegisters.size())) {
533 emitGetArg(src1, X86::eax);
534 emitGetArg(src2, X86::edx);
535 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
536 emitFastArithDeTagImmediate(X86::eax);
537 m_jit.addl_rr(X86::edx, X86::eax);
538 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
543 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
544 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
545 emitCall(i, Machine::cti_op_add);
546 emitPutResult(instruction[i + 1].u.operand);
551 if (m_codeBlock->needsFullScopeChain)
552 emitCall(i, Machine::cti_op_end);
553 emitGetArg(instruction[i + 1].u.operand, X86::eax);
554 #if ENABLE(SAMPLING_TOOL)
555 m_jit.movl_i32m(-1, ¤tOpcodeID);
557 m_jit.pushl_m(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi);
563 unsigned target = instruction[i + 1].u.operand;
564 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
569 int srcDst = instruction[i + 1].u.operand;
570 emitGetArg(srcDst, X86::eax);
571 emitJumpSlowCaseIfNotImm(X86::eax, i);
572 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
573 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
574 emitPutResult(srcDst, X86::eax);
579 emitSlowScriptCheck(i);
581 unsigned target = instruction[i + 1].u.operand;
582 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
586 case op_loop_if_less: {
587 emitSlowScriptCheck(i);
589 unsigned target = instruction[i + 3].u.operand;
590 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
592 emitGetArg(instruction[i + 1].u.operand, X86::edx);
593 emitJumpSlowCaseIfNotImm(X86::edx, i);
594 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
595 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
597 emitGetArg(instruction[i + 1].u.operand, X86::eax);
598 emitGetArg(instruction[i + 2].u.operand, X86::edx);
599 emitJumpSlowCaseIfNotImm(X86::eax, i);
600 emitJumpSlowCaseIfNotImm(X86::edx, i);
601 m_jit.cmpl_rr(X86::edx, X86::eax);
602 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
607 case op_loop_if_lesseq: {
608 emitSlowScriptCheck(i);
610 unsigned target = instruction[i + 3].u.operand;
611 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
613 emitGetArg(instruction[i + 1].u.operand, X86::edx);
614 emitJumpSlowCaseIfNotImm(X86::edx, i);
615 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
616 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
618 emitGetArg(instruction[i + 1].u.operand, X86::eax);
619 emitGetArg(instruction[i + 2].u.operand, X86::edx);
620 emitJumpSlowCaseIfNotImm(X86::eax, i);
621 emitJumpSlowCaseIfNotImm(X86::edx, i);
622 m_jit.cmpl_rr(X86::edx, X86::eax);
623 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
628 case op_new_object: {
629 emitCall(i, Machine::cti_op_new_object);
630 emitPutResult(instruction[i + 1].u.operand);
635 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
636 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
637 // such that the StructureID & offset are always at the same distance from this.
639 emitGetArg(instruction[i + 1].u.operand, X86::eax);
640 emitGetArg(instruction[i + 3].u.operand, X86::edx);
642 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
643 X86Assembler::JmpDst hotPathBegin = m_jit.label();
644 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
645 ++structureIDInstructionIndex;
647 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
648 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
649 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
650 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
651 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
652 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
654 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
655 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
656 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
657 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
663 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
664 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
665 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
666 // to jump back to if one of these trampolies finds a match.
668 emitGetArg(instruction[i + 2].u.operand, X86::eax);
670 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
672 X86Assembler::JmpDst hotPathBegin = m_jit.label();
673 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
674 ++structureIDInstructionIndex;
676 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
677 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
678 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
679 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
680 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
682 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
683 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
684 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
685 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
690 case op_instanceof: {
691 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
692 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
693 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
694 emitCall(i, Machine::cti_op_instanceof);
695 emitPutResult(instruction[i + 1].u.operand);
700 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
701 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
702 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
703 emitCall(i, Machine::cti_op_del_by_id);
704 emitPutResult(instruction[i + 1].u.operand);
709 unsigned dst = instruction[i + 1].u.operand;
710 unsigned src1 = instruction[i + 2].u.operand;
711 unsigned src2 = instruction[i + 3].u.operand;
712 if (src1 < m_codeBlock->constantRegisters.size() || src2 < m_codeBlock->constantRegisters.size()) {
713 unsigned constant = src1;
714 unsigned nonconstant = src2;
715 if (!(src1 < m_codeBlock->constantRegisters.size())) {
719 JSValue* value = m_codeBlock->constantRegisters[constant].jsValue(m_exec);
720 if (JSImmediate::isNumber(value)) {
721 emitGetArg(nonconstant, X86::eax);
722 emitJumpSlowCaseIfNotImm(X86::eax, i);
723 emitFastArithImmToInt(X86::eax);
724 m_jit.imull_i32r( X86::eax, getDeTaggedConstantImmediate(value), X86::eax);
725 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
726 emitFastArithPotentiallyReTagImmediate(X86::eax);
733 emitGetArg(src1, X86::eax);
734 emitGetArg(src2, X86::edx);
735 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
736 emitFastArithDeTagImmediate(X86::eax);
737 emitFastArithImmToInt(X86::edx);
738 m_jit.imull_rr(X86::edx, X86::eax);
739 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
740 emitFastArithPotentiallyReTagImmediate(X86::eax);
746 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
747 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
748 emitCall(i, Machine::cti_op_new_func);
749 emitPutResult(instruction[i + 1].u.operand);
754 compileOpCall(instruction, i);
758 case op_get_global_var: {
759 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
760 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
761 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
762 emitPutResult(instruction[i + 1].u.operand, X86::eax);
766 case op_put_global_var: {
767 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
768 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
769 emitGetArg(instruction[i + 3].u.operand, X86::edx);
770 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
774 case op_get_scoped_var: {
775 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
777 emitGetCTIParam(CTI_ARGS_scopeChain, X86::eax);
779 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
781 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
782 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
783 emitPutResult(instruction[i + 1].u.operand);
787 case op_put_scoped_var: {
788 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
790 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
791 emitGetArg(instruction[i + 3].u.operand, X86::eax);
793 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
795 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
796 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
801 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
802 emitCall(i, Machine::cti_op_ret);
804 m_jit.pushl_m(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi);
810 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
811 emitPutArg(X86::edx, 0);
812 emitPutArgConstant(instruction[i + 3].u.operand, 4);
813 emitCall(i, Machine::cti_op_new_array);
814 emitPutResult(instruction[i + 1].u.operand);
819 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
820 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
821 emitCall(i, Machine::cti_op_resolve);
822 emitPutResult(instruction[i + 1].u.operand);
827 compileOpCall(instruction, i, OpConstruct);
831 case op_construct_verify: {
832 emitPutArgConstant(instruction[i + 1].u.operand, 0);
833 emitPutArgConstant(instruction[i + 2].u.operand, 4);
834 emitCall(i, Machine::cti_op_construct_verify);
838 case op_get_by_val: {
839 emitGetArg(instruction[i + 2].u.operand, X86::eax);
840 emitGetArg(instruction[i + 3].u.operand, X86::edx);
841 emitJumpSlowCaseIfNotImm(X86::edx, i);
842 emitFastArithImmToInt(X86::edx);
843 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
844 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
845 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
846 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
848 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
849 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
850 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
851 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
853 // Get the value from the vector
854 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
855 emitPutResult(instruction[i + 1].u.operand);
859 case op_resolve_func: {
860 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
861 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
862 emitCall(i, Machine::cti_op_resolve_func);
863 emitPutResult(instruction[i + 1].u.operand);
864 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
865 emitPutResult(instruction[i + 2].u.operand);
870 emitGetArg(instruction[i + 2].u.operand, X86::eax);
871 emitGetArg(instruction[i + 3].u.operand, X86::edx);
872 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
873 m_jit.subl_rr(X86::edx, X86::eax);
874 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
875 emitFastArithReTagImmediate(X86::eax);
876 emitPutResult(instruction[i + 1].u.operand);
880 case op_put_by_val: {
881 emitGetArg(instruction[i + 1].u.operand, X86::eax);
882 emitGetArg(instruction[i + 2].u.operand, X86::edx);
883 emitJumpSlowCaseIfNotImm(X86::edx, i);
884 emitFastArithImmToInt(X86::edx);
885 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
886 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
887 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
888 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
890 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
891 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
892 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
893 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
894 // No; oh well, check if the access if within the vector - if so, we may still be okay.
895 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
896 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
898 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
899 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
900 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
901 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
903 // All good - put the value into the array.
904 m_jit.link(inFastVector, m_jit.label());
905 emitGetArg(instruction[i + 3].u.operand, X86::eax);
906 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
910 CTI_COMPILE_BINARY_OP(op_lesseq)
911 case op_loop_if_true: {
912 emitSlowScriptCheck(i);
914 unsigned target = instruction[i + 2].u.operand;
915 emitGetArg(instruction[i + 1].u.operand, X86::eax);
917 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
918 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
919 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
920 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
922 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
923 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
924 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
925 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
927 m_jit.link(isZero, m_jit.label());
931 case op_resolve_base: {
932 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
933 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
934 emitCall(i, Machine::cti_op_resolve_base);
935 emitPutResult(instruction[i + 1].u.operand);
940 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
941 emitCall(i, Machine::cti_op_negate);
942 emitPutResult(instruction[i + 1].u.operand);
946 case op_resolve_skip: {
947 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
948 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
949 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
950 emitCall(i, Machine::cti_op_resolve_skip);
951 emitPutResult(instruction[i + 1].u.operand);
955 case op_resolve_global: {
957 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
958 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
959 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
960 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
962 // Check StructureID of global object
963 m_jit.movl_i32r(globalObject, X86::eax);
964 m_jit.movl_mr(structureIDAddr, X86::edx);
965 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
966 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
967 m_slowCases.append(SlowCaseEntry(slowCase, i));
969 // Load cached property
970 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
971 m_jit.movl_mr(offsetAddr, X86::edx);
972 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
973 emitPutResult(instruction[i + 1].u.operand);
974 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
977 m_jit.link(slowCase, m_jit.label());
978 emitPutArgConstant(globalObject, 0);
979 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
980 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
981 emitCall(i, Machine::cti_op_resolve_global);
982 emitPutResult(instruction[i + 1].u.operand);
983 m_jit.link(end, m_jit.label());
985 ++structureIDInstructionIndex;
988 CTI_COMPILE_BINARY_OP(op_div)
990 int srcDst = instruction[i + 1].u.operand;
991 emitGetArg(srcDst, X86::eax);
992 emitJumpSlowCaseIfNotImm(X86::eax, i);
993 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
994 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
995 emitPutResult(srcDst, X86::eax);
1000 unsigned target = instruction[i + 3].u.operand;
1001 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1003 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1004 emitJumpSlowCaseIfNotImm(X86::edx, i);
1005 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1006 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1008 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1009 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1010 emitJumpSlowCaseIfNotImm(X86::eax, i);
1011 emitJumpSlowCaseIfNotImm(X86::edx, i);
1012 m_jit.cmpl_rr(X86::edx, X86::eax);
1013 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1019 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1020 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1021 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1022 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1023 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1024 emitPutResult(instruction[i + 1].u.operand);
1029 unsigned target = instruction[i + 2].u.operand;
1030 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1032 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1033 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1034 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1035 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1037 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1038 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1039 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1040 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1042 m_jit.link(isNonZero, m_jit.label());
1047 int srcDst = instruction[i + 2].u.operand;
1048 emitGetArg(srcDst, X86::eax);
1049 m_jit.movl_rr(X86::eax, X86::edx);
1050 emitJumpSlowCaseIfNotImm(X86::eax, i);
1051 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1052 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1053 emitPutResult(srcDst, X86::edx);
1054 emitPutResult(instruction[i + 1].u.operand);
1058 case op_unexpected_load: {
1059 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1060 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1061 emitPutResult(instruction[i + 1].u.operand);
1066 int retAddrDst = instruction[i + 1].u.operand;
1067 int target = instruction[i + 2].u.operand;
1068 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1069 X86Assembler::JmpDst addrPosition = m_jit.label();
1070 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1071 X86Assembler::JmpDst sretTarget = m_jit.label();
1072 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1077 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1081 CTI_COMPILE_BINARY_OP(op_eq)
1083 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1084 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1085 emitJumpSlowCaseIfNotImm(X86::eax, i);
1086 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1087 emitFastArithImmToInt(X86::eax);
1088 emitFastArithImmToInt(X86::ecx);
1089 m_jit.shll_CLr(X86::eax);
1090 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1091 emitPutResult(instruction[i + 1].u.operand);
1096 unsigned src1 = instruction[i + 2].u.operand;
1097 unsigned src2 = instruction[i + 3].u.operand;
1098 unsigned dst = instruction[i + 1].u.operand;
1099 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1100 emitGetArg(src2, X86::eax);
1101 emitJumpSlowCaseIfNotImm(X86::eax, i);
1102 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1104 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1105 emitGetArg(src1, X86::eax);
1106 emitJumpSlowCaseIfNotImm(X86::eax, i);
1107 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1110 emitGetArg(src1, X86::eax);
1111 emitGetArg(src2, X86::edx);
1112 m_jit.andl_rr(X86::edx, X86::eax);
1113 emitJumpSlowCaseIfNotImm(X86::eax, i);
1120 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1121 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1122 emitJumpSlowCaseIfNotImm(X86::eax, i);
1123 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1124 emitFastArithImmToInt(X86::ecx);
1125 m_jit.sarl_CLr(X86::eax);
1126 emitFastArithPotentiallyReTagImmediate(X86::eax);
1127 emitPutResult(instruction[i + 1].u.operand);
1132 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1133 emitJumpSlowCaseIfNotImm(X86::eax, i);
1134 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1135 emitPutResult(instruction[i + 1].u.operand);
1139 case op_resolve_with_base: {
1140 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1141 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1142 emitCall(i, Machine::cti_op_resolve_with_base);
1143 emitPutResult(instruction[i + 1].u.operand);
1144 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1145 emitPutResult(instruction[i + 2].u.operand);
1149 case op_new_func_exp: {
1150 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1151 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1152 emitCall(i, Machine::cti_op_new_func_exp);
1153 emitPutResult(instruction[i + 1].u.operand);
1158 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1159 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1160 emitJumpSlowCaseIfNotImm(X86::eax, i);
1161 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1162 emitFastArithDeTagImmediate(X86::eax);
1163 emitFastArithDeTagImmediate(X86::ecx);
1164 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1166 m_jit.idivl_r(X86::ecx);
1167 emitFastArithReTagImmediate(X86::edx);
1168 m_jit.movl_rr(X86::edx, X86::eax);
1169 emitPutResult(instruction[i + 1].u.operand);
1174 unsigned target = instruction[i + 2].u.operand;
1175 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1177 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1178 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1179 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1180 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1182 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1183 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1184 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1185 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1187 m_jit.link(isZero, m_jit.label());
1191 CTI_COMPILE_BINARY_OP(op_less)
1192 CTI_COMPILE_BINARY_OP(op_neq)
1194 int srcDst = instruction[i + 2].u.operand;
1195 emitGetArg(srcDst, X86::eax);
1196 m_jit.movl_rr(X86::eax, X86::edx);
1197 emitJumpSlowCaseIfNotImm(X86::eax, i);
1198 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1199 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1200 emitPutResult(srcDst, X86::edx);
1201 emitPutResult(instruction[i + 1].u.operand);
1205 CTI_COMPILE_BINARY_OP(op_urshift)
1207 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1208 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1209 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
1210 m_jit.xorl_rr(X86::edx, X86::eax);
1211 emitFastArithReTagImmediate(X86::eax);
1212 emitPutResult(instruction[i + 1].u.operand);
1216 case op_new_regexp: {
1217 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1218 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1219 emitCall(i, Machine::cti_op_new_regexp);
1220 emitPutResult(instruction[i + 1].u.operand);
1225 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1226 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1227 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
1228 m_jit.orl_rr(X86::edx, X86::eax);
1229 emitPutResult(instruction[i + 1].u.operand);
1233 case op_call_eval: {
1234 compileOpCall(instruction, i, OpCallEval);
1239 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1240 emitCall(i, Machine::cti_op_throw);
1241 m_jit.addl_i8r(0x24, X86::esp);
1242 m_jit.popl_r(X86::edi);
1243 m_jit.popl_r(X86::esi);
1248 case op_get_pnames: {
1249 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1250 emitCall(i, Machine::cti_op_get_pnames);
1251 emitPutResult(instruction[i + 1].u.operand);
1255 case op_next_pname: {
1256 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1257 unsigned target = instruction[i + 3].u.operand;
1258 emitCall(i, Machine::cti_op_next_pname);
1259 m_jit.testl_rr(X86::eax, X86::eax);
1260 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1261 emitPutResult(instruction[i + 1].u.operand);
1262 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1263 m_jit.link(endOfIter, m_jit.label());
1267 case op_push_scope: {
1268 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1269 emitCall(i, Machine::cti_op_push_scope);
1273 case op_pop_scope: {
1274 emitCall(i, Machine::cti_op_pop_scope);
1278 CTI_COMPILE_UNARY_OP(op_typeof)
1279 CTI_COMPILE_UNARY_OP(op_is_undefined)
1280 CTI_COMPILE_UNARY_OP(op_is_boolean)
1281 CTI_COMPILE_UNARY_OP(op_is_number)
1282 CTI_COMPILE_UNARY_OP(op_is_string)
1283 CTI_COMPILE_UNARY_OP(op_is_object)
1284 CTI_COMPILE_UNARY_OP(op_is_function)
1285 CTI_COMPILE_BINARY_OP(op_stricteq)
1286 CTI_COMPILE_BINARY_OP(op_nstricteq)
1287 case op_to_jsnumber: {
1288 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1289 emitCall(i, Machine::cti_op_to_jsnumber);
1290 emitPutResult(instruction[i + 1].u.operand);
1295 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1296 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1297 emitCall(i, Machine::cti_op_in);
1298 emitPutResult(instruction[i + 1].u.operand);
1302 case op_push_new_scope: {
1303 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1304 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1305 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1306 emitCall(i, Machine::cti_op_push_new_scope);
1307 emitPutResult(instruction[i + 1].u.operand);
1312 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1313 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1314 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_exception), X86::ecx, X86::eax);
1315 m_jit.movl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
1316 emitPutResult(instruction[i + 1].u.operand);
1320 case op_jmp_scopes: {
1321 unsigned count = instruction[i + 1].u.operand;
1322 emitPutArgConstant(count, 0);
1323 emitCall(i, Machine::cti_op_jmp_scopes);
1324 unsigned target = instruction[i + 2].u.operand;
1325 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1329 case op_put_by_index: {
1330 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1331 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1332 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1333 emitCall(i, Machine::cti_op_put_by_index);
1337 case op_switch_imm: {
1338 unsigned tableIndex = instruction[i + 1].u.operand;
1339 unsigned defaultOffset = instruction[i + 2].u.operand;
1340 unsigned scrutinee = instruction[i + 3].u.operand;
1342 // create jump table for switch destinations, track this switch statement.
1343 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1344 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1345 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1347 emitGetPutArg(scrutinee, 0, X86::ecx);
1348 emitPutArgConstant(tableIndex, 4);
1349 emitCall(i, Machine::cti_op_switch_imm);
1350 m_jit.jmp_r(X86::eax);
1354 case op_switch_char: {
1355 unsigned tableIndex = instruction[i + 1].u.operand;
1356 unsigned defaultOffset = instruction[i + 2].u.operand;
1357 unsigned scrutinee = instruction[i + 3].u.operand;
1359 // create jump table for switch destinations, track this switch statement.
1360 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1361 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1362 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1364 emitGetPutArg(scrutinee, 0, X86::ecx);
1365 emitPutArgConstant(tableIndex, 4);
1366 emitCall(i, Machine::cti_op_switch_char);
1367 m_jit.jmp_r(X86::eax);
1371 case op_switch_string: {
1372 unsigned tableIndex = instruction[i + 1].u.operand;
1373 unsigned defaultOffset = instruction[i + 2].u.operand;
1374 unsigned scrutinee = instruction[i + 3].u.operand;
1376 // create jump table for switch destinations, track this switch statement.
1377 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1378 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1380 emitGetPutArg(scrutinee, 0, X86::ecx);
1381 emitPutArgConstant(tableIndex, 4);
1382 emitCall(i, Machine::cti_op_switch_string);
1383 m_jit.jmp_r(X86::eax);
1387 case op_del_by_val: {
1388 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1389 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1390 emitCall(i, Machine::cti_op_del_by_val);
1391 emitPutResult(instruction[i + 1].u.operand);
1395 case op_put_getter: {
1396 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1397 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1398 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1399 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1400 emitCall(i, Machine::cti_op_put_getter);
1404 case op_put_setter: {
1405 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1406 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1407 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1408 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1409 emitCall(i, Machine::cti_op_put_setter);
1413 case op_new_error: {
1414 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1415 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1416 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1417 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1418 emitCall(i, Machine::cti_op_new_error);
1419 emitPutResult(instruction[i + 1].u.operand);
1424 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1425 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1426 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1427 emitCall(i, Machine::cti_op_debug);
1432 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1433 emitCall(i, Machine::cti_op_eq_null);
1434 emitPutResult(instruction[i + 1].u.operand);
1439 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1440 emitCall(i, Machine::cti_op_neq_null);
1441 emitPutResult(instruction[i + 1].u.operand);
1445 case op_get_array_length:
1446 case op_get_by_id_chain:
1447 case op_get_by_id_generic:
1448 case op_get_by_id_proto:
1449 case op_get_by_id_self:
1450 case op_get_string_length:
1451 case op_put_by_id_generic:
1452 case op_put_by_id_replace:
1453 case op_put_by_id_transition:
1454 ASSERT_NOT_REACHED();
1458 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1462 void CTI::privateCompileLinkPass()
1464 unsigned jmpTableCount = m_jmpTable.size();
1465 for (unsigned i = 0; i < jmpTableCount; ++i)
1466 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
1470 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
1472 m_jit.link(iter->from, m_jit.label()); \
1473 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
1474 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
1475 emitCall(i, Machine::cti_##name); \
1476 emitPutResult(instruction[i + 1].u.operand); \
1481 void CTI::privateCompileSlowCases()
1483 unsigned structureIDInstructionIndex = 0;
1485 Instruction* instruction = m_codeBlock->instructions.begin();
1486 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
1487 unsigned i = iter->to;
1488 m_jit.emitRestoreArgumentReference();
1489 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
1491 unsigned dst = instruction[i + 1].u.operand;
1492 unsigned src2 = instruction[i + 3].u.operand;
1493 if (src2 < m_codeBlock->constantRegisters.size()) {
1494 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
1495 if (JSImmediate::isNumber(value)) {
1496 X86Assembler::JmpSrc notImm = iter->from;
1497 m_jit.link((++iter)->from, m_jit.label());
1498 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1499 m_jit.link(notImm, m_jit.label());
1500 emitPutArg(X86::eax, 0);
1501 emitGetPutArg(src2, 4, X86::ecx);
1502 emitCall(i, Machine::cti_op_add);
1509 ASSERT(!(static_cast<unsigned>(instruction[i + 2].u.operand) < m_codeBlock->constantRegisters.size()));
1511 X86Assembler::JmpSrc notImm = iter->from;
1512 m_jit.link((++iter)->from, m_jit.label());
1513 m_jit.subl_rr(X86::edx, X86::eax);
1514 emitFastArithReTagImmediate(X86::eax);
1515 m_jit.link(notImm, m_jit.label());
1516 emitPutArg(X86::eax, 0);
1517 emitPutArg(X86::edx, 4);
1518 emitCall(i, Machine::cti_op_add);
1523 case op_get_by_val: {
1524 // The slow case that handles accesses to arrays (below) may jump back up to here.
1525 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
1527 X86Assembler::JmpSrc notImm = iter->from;
1528 m_jit.link((++iter)->from, m_jit.label());
1529 m_jit.link((++iter)->from, m_jit.label());
1530 emitFastArithIntToImmNoCheck(X86::edx);
1531 m_jit.link(notImm, m_jit.label());
1532 emitPutArg(X86::eax, 0);
1533 emitPutArg(X86::edx, 4);
1534 emitCall(i, Machine::cti_op_get_by_val);
1535 emitPutResult(instruction[i + 1].u.operand);
1536 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1538 // This is slow case that handles accesses to arrays above the fast cut-off.
1539 // First, check if this is an access to the vector
1540 m_jit.link((++iter)->from, m_jit.label());
1541 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1542 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
1544 // okay, missed the fast region, but it is still in the vector. Get the value.
1545 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
1546 // Check whether the value loaded is zero; if so we need to return undefined.
1547 m_jit.testl_rr(X86::ecx, X86::ecx);
1548 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
1549 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1555 X86Assembler::JmpSrc notImm = iter->from;
1556 m_jit.link((++iter)->from, m_jit.label());
1557 m_jit.addl_rr(X86::edx, X86::eax);
1558 m_jit.link(notImm, m_jit.label());
1559 emitPutArg(X86::eax, 0);
1560 emitPutArg(X86::edx, 4);
1561 emitCall(i, Machine::cti_op_sub);
1562 emitPutResult(instruction[i + 1].u.operand);
1567 m_jit.link(iter->from, m_jit.label());
1568 m_jit.link((++iter)->from, m_jit.label());
1569 emitPutArg(X86::eax, 0);
1570 emitPutArg(X86::ecx, 4);
1571 emitCall(i, Machine::cti_op_rshift);
1572 emitPutResult(instruction[i + 1].u.operand);
1577 X86Assembler::JmpSrc notImm1 = iter->from;
1578 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1579 m_jit.link((++iter)->from, m_jit.label());
1580 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1581 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1582 m_jit.link(notImm1, m_jit.label());
1583 m_jit.link(notImm2, m_jit.label());
1584 emitPutArg(X86::eax, 0);
1585 emitPutArg(X86::ecx, 4);
1586 emitCall(i, Machine::cti_op_lshift);
1587 emitPutResult(instruction[i + 1].u.operand);
1591 case op_loop_if_less: {
1592 emitSlowScriptCheck(i);
1594 unsigned target = instruction[i + 3].u.operand;
1595 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1597 m_jit.link(iter->from, m_jit.label());
1598 emitPutArg(X86::edx, 0);
1599 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1600 emitCall(i, Machine::cti_op_loop_if_less);
1601 m_jit.testl_rr(X86::eax, X86::eax);
1602 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1604 m_jit.link(iter->from, m_jit.label());
1605 m_jit.link((++iter)->from, m_jit.label());
1606 emitPutArg(X86::eax, 0);
1607 emitPutArg(X86::edx, 4);
1608 emitCall(i, Machine::cti_op_loop_if_less);
1609 m_jit.testl_rr(X86::eax, X86::eax);
1610 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1615 case op_put_by_id: {
1616 m_jit.link(iter->from, m_jit.label());
1617 m_jit.link((++iter)->from, m_jit.label());
1619 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1620 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1621 emitPutArg(X86::eax, 0);
1622 emitPutArg(X86::edx, 8);
1623 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
1625 // Track the location of the call; this will be used to recover repatch information.
1626 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1627 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1628 ++structureIDInstructionIndex;
1633 case op_get_by_id: {
1634 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1635 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1636 // of the call (which we can use to look up the repatch information), but should a array-length or
1637 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back
1638 // the distance from the call to the head of the slow case.
1640 m_jit.link(iter->from, m_jit.label());
1641 m_jit.link((++iter)->from, m_jit.label());
1644 X86Assembler::JmpDst coldPathBegin = m_jit.label();
1646 emitPutArg(X86::eax, 0);
1647 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1648 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1649 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
1650 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
1651 emitPutResult(instruction[i + 1].u.operand);
1653 // Track the location of the call; this will be used to recover repatch information.
1654 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1655 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1656 ++structureIDInstructionIndex;
1661 case op_resolve_global: {
1662 ++structureIDInstructionIndex;
1666 case op_loop_if_lesseq: {
1667 emitSlowScriptCheck(i);
1669 unsigned target = instruction[i + 3].u.operand;
1670 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1672 m_jit.link(iter->from, m_jit.label());
1673 emitPutArg(X86::edx, 0);
1674 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1675 emitCall(i, Machine::cti_op_loop_if_lesseq);
1676 m_jit.testl_rr(X86::eax, X86::eax);
1677 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1679 m_jit.link(iter->from, m_jit.label());
1680 m_jit.link((++iter)->from, m_jit.label());
1681 emitPutArg(X86::eax, 0);
1682 emitPutArg(X86::edx, 4);
1683 emitCall(i, Machine::cti_op_loop_if_lesseq);
1684 m_jit.testl_rr(X86::eax, X86::eax);
1685 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1691 unsigned srcDst = instruction[i + 1].u.operand;
1692 X86Assembler::JmpSrc notImm = iter->from;
1693 m_jit.link((++iter)->from, m_jit.label());
1694 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1695 m_jit.link(notImm, m_jit.label());
1696 emitPutArg(X86::eax, 0);
1697 emitCall(i, Machine::cti_op_pre_inc);
1698 emitPutResult(srcDst);
1702 case op_put_by_val: {
1703 // Normal slow cases - either is not an immediate imm, or is an array.
1704 X86Assembler::JmpSrc notImm = iter->from;
1705 m_jit.link((++iter)->from, m_jit.label());
1706 m_jit.link((++iter)->from, m_jit.label());
1707 emitFastArithIntToImmNoCheck(X86::edx);
1708 m_jit.link(notImm, m_jit.label());
1709 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1710 emitPutArg(X86::eax, 0);
1711 emitPutArg(X86::edx, 4);
1712 emitPutArg(X86::ecx, 8);
1713 emitCall(i, Machine::cti_op_put_by_val);
1714 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1716 // slow cases for immediate int accesses to arrays
1717 m_jit.link((++iter)->from, m_jit.label());
1718 m_jit.link((++iter)->from, m_jit.label());
1719 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1720 emitPutArg(X86::eax, 0);
1721 emitPutArg(X86::edx, 4);
1722 emitPutArg(X86::ecx, 8);
1723 emitCall(i, Machine::cti_op_put_by_val_array);
1728 case op_loop_if_true: {
1729 emitSlowScriptCheck(i);
1731 m_jit.link(iter->from, m_jit.label());
1732 emitPutArg(X86::eax, 0);
1733 emitCall(i, Machine::cti_op_jtrue);
1734 m_jit.testl_rr(X86::eax, X86::eax);
1735 unsigned target = instruction[i + 2].u.operand;
1736 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
1741 unsigned srcDst = instruction[i + 1].u.operand;
1742 X86Assembler::JmpSrc notImm = iter->from;
1743 m_jit.link((++iter)->from, m_jit.label());
1744 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1745 m_jit.link(notImm, m_jit.label());
1746 emitPutArg(X86::eax, 0);
1747 emitCall(i, Machine::cti_op_pre_dec);
1748 emitPutResult(srcDst);
1753 unsigned target = instruction[i + 3].u.operand;
1754 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1756 m_jit.link(iter->from, m_jit.label());
1757 emitPutArg(X86::edx, 0);
1758 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1759 emitCall(i, Machine::cti_op_jless);
1760 m_jit.testl_rr(X86::eax, X86::eax);
1761 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1763 m_jit.link(iter->from, m_jit.label());
1764 m_jit.link((++iter)->from, m_jit.label());
1765 emitPutArg(X86::eax, 0);
1766 emitPutArg(X86::edx, 4);
1767 emitCall(i, Machine::cti_op_jless);
1768 m_jit.testl_rr(X86::eax, X86::eax);
1769 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1775 m_jit.link(iter->from, m_jit.label());
1776 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1777 emitPutArg(X86::eax, 0);
1778 emitCall(i, Machine::cti_op_not);
1779 emitPutResult(instruction[i + 1].u.operand);
1784 m_jit.link(iter->from, m_jit.label());
1785 emitPutArg(X86::eax, 0);
1786 emitCall(i, Machine::cti_op_jtrue);
1787 m_jit.testl_rr(X86::eax, X86::eax);
1788 unsigned target = instruction[i + 2].u.operand;
1789 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
1794 unsigned srcDst = instruction[i + 2].u.operand;
1795 m_jit.link(iter->from, m_jit.label());
1796 m_jit.link((++iter)->from, m_jit.label());
1797 emitPutArg(X86::eax, 0);
1798 emitCall(i, Machine::cti_op_post_inc);
1799 emitPutResult(instruction[i + 1].u.operand);
1800 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1801 emitPutResult(srcDst);
1806 m_jit.link(iter->from, m_jit.label());
1807 emitPutArg(X86::eax, 0);
1808 emitCall(i, Machine::cti_op_bitnot);
1809 emitPutResult(instruction[i + 1].u.operand);
1814 unsigned src1 = instruction[i + 2].u.operand;
1815 unsigned src2 = instruction[i + 3].u.operand;
1816 unsigned dst = instruction[i + 1].u.operand;
1817 if (getConstantImmediateNumericArg(src1)) {
1818 m_jit.link(iter->from, m_jit.label());
1819 emitGetPutArg(src1, 0, X86::ecx);
1820 emitPutArg(X86::eax, 4);
1821 emitCall(i, Machine::cti_op_bitand);
1823 } else if (getConstantImmediateNumericArg(src2)) {
1824 m_jit.link(iter->from, m_jit.label());
1825 emitPutArg(X86::eax, 0);
1826 emitGetPutArg(src2, 4, X86::ecx);
1827 emitCall(i, Machine::cti_op_bitand);
1830 m_jit.link(iter->from, m_jit.label());
1831 emitGetPutArg(src1, 0, X86::ecx);
1832 emitPutArg(X86::edx, 4);
1833 emitCall(i, Machine::cti_op_bitand);
1840 m_jit.link(iter->from, m_jit.label());
1841 emitPutArg(X86::eax, 0);
1842 emitCall(i, Machine::cti_op_jtrue);
1843 m_jit.testl_rr(X86::eax, X86::eax);
1844 unsigned target = instruction[i + 2].u.operand;
1845 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
1850 unsigned srcDst = instruction[i + 2].u.operand;
1851 m_jit.link(iter->from, m_jit.label());
1852 m_jit.link((++iter)->from, m_jit.label());
1853 emitPutArg(X86::eax, 0);
1854 emitCall(i, Machine::cti_op_post_dec);
1855 emitPutResult(instruction[i + 1].u.operand);
1856 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1857 emitPutResult(srcDst);
1862 m_jit.link(iter->from, m_jit.label());
1863 emitPutArg(X86::eax, 0);
1864 emitPutArg(X86::edx, 4);
1865 emitCall(i, Machine::cti_op_bitxor);
1866 emitPutResult(instruction[i + 1].u.operand);
1871 m_jit.link(iter->from, m_jit.label());
1872 emitPutArg(X86::eax, 0);
1873 emitPutArg(X86::edx, 4);
1874 emitCall(i, Machine::cti_op_bitor);
1875 emitPutResult(instruction[i + 1].u.operand);
1880 X86Assembler::JmpSrc notImm1 = iter->from;
1881 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1882 m_jit.link((++iter)->from, m_jit.label());
1883 emitFastArithReTagImmediate(X86::eax);
1884 emitFastArithReTagImmediate(X86::ecx);
1885 m_jit.link(notImm1, m_jit.label());
1886 m_jit.link(notImm2, m_jit.label());
1887 emitPutArg(X86::eax, 0);
1888 emitPutArg(X86::ecx, 4);
1889 emitCall(i, Machine::cti_op_mod);
1890 emitPutResult(instruction[i + 1].u.operand);
1894 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_mul);
1896 ASSERT_NOT_REACHED();
1900 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
1903 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1906 void CTI::privateCompile()
1908 // Could use a popl_m, but would need to offset the following instruction if so.
1909 m_jit.popl_r(X86::ecx);
1910 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1911 emitPutToCallFrameHeader(X86::ecx, RegisterFile::CTIReturnEIP);
1913 privateCompileMainPass();
1914 privateCompileLinkPass();
1915 privateCompileSlowCases();
1917 ASSERT(m_jmpTable.isEmpty());
1919 void* code = m_jit.copy();
1922 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
1923 for (unsigned i = 0; i < m_switches.size(); ++i) {
1924 SwitchRecord record = m_switches[i];
1925 unsigned opcodeIndex = record.m_opcodeIndex;
1927 if (record.m_type != SwitchRecord::String) {
1928 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
1929 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
1931 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
1933 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
1934 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
1935 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
1938 ASSERT(record.m_type == SwitchRecord::String);
1940 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
1942 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
1943 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
1944 unsigned offset = it->second.branchOffset;
1945 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
1950 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
1951 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
1953 // FIXME: There doesn't seem to be a way to hint to a hashmap that it should make a certain capacity available;
1954 // could be faster if we could do something like this:
1955 // m_codeBlock->ctiReturnAddressVPCMap.grow(m_calls.size());
1956 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
1957 X86Assembler::link(code, iter->from, iter->to);
1958 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
1961 // Link absolute addresses for jsr
1962 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
1963 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
1965 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
1966 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
1967 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
1968 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
1972 m_codeBlock->ctiCode = code;
1975 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
1977 // Check eax is an object of the right StructureID.
1978 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1979 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
1980 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1981 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
1983 // Checks out okay! - getDirectOffset
1984 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1985 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
1988 void* code = m_jit.copy();
1991 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
1992 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
1994 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
1996 ctiRepatchCallByReturnAddress(returnAddress, code);
1999 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2001 #if USE(CTI_REPATCH_PIC)
2002 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2004 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2005 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2007 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2008 // referencing the prototype object - let's speculatively load it's table nice and early!)
2009 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2010 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2011 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2013 // check eax is an object of the right StructureID.
2014 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2015 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2016 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2017 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2019 // Check the prototype object's StructureID had not changed.
2020 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2021 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2022 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2024 // Checks out okay! - getDirectOffset
2025 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2027 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2029 void* code = m_jit.copy();
2032 // Use the repatch information to link the failure cases back to the original slow case routine.
2033 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2034 X86Assembler::link(code, failureCases1, slowCaseBegin);
2035 X86Assembler::link(code, failureCases2, slowCaseBegin);
2036 X86Assembler::link(code, failureCases3, slowCaseBegin);
2038 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2039 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2040 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2042 // Track the stub we have created so that it will be deleted later.
2043 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2045 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2046 // FIXME: should revert this repatching, on failure.
2047 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2048 X86Assembler::repatchBranchOffset(jmpLocation, code);
2050 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2051 // referencing the prototype object - let's speculatively load it's table nice and early!)
2052 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2053 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2054 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2056 // check eax is an object of the right StructureID.
2057 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2058 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2059 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2060 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2062 // Check the prototype object's StructureID had not changed.
2063 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2064 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2065 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2067 // Checks out okay! - getDirectOffset
2068 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2072 void* code = m_jit.copy();
2075 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2076 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2077 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2079 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2081 ctiRepatchCallByReturnAddress(returnAddress, code);
2085 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2089 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2091 // Check eax is an object of the right StructureID.
2092 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2093 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2094 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2095 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2097 StructureID* currStructureID = structureID;
2098 RefPtr<StructureID>* chainEntries = chain->head();
2099 JSObject* protoObject = 0;
2100 for (unsigned i = 0; i<count; ++i) {
2101 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2102 currStructureID = chainEntries[i].get();
2104 // Check the prototype object's StructureID had not changed.
2105 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2106 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2107 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2109 ASSERT(protoObject);
2111 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2112 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2113 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2116 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2118 void* code = m_jit.copy();
2121 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2122 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2124 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2126 ctiRepatchCallByReturnAddress(returnAddress, code);
2129 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2131 // check eax is an object of the right StructureID.
2132 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2133 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2134 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2135 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2137 // checks out okay! - putDirectOffset
2138 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2139 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2142 void* code = m_jit.copy();
2145 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2146 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2148 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2150 ctiRepatchCallByReturnAddress(returnAddress, code);
2155 static JSValue* SFX_CALL transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2157 StructureID* oldStructureID = newStructureID->previousID();
2159 baseObject->transitionTo(newStructureID);
2161 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2162 baseObject->allocatePropertyStorage(oldStructureID->propertyMap().storageSize(), oldStructureID->propertyMap().size());
2164 baseObject->putDirectOffset(cachedOffset, value);
2170 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2172 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2175 if (oldStructureID->propertyMap().storageSize() < JSObject::inlineStorageCapacity)
2178 if (oldStructureID->propertyMap().size() != newStructureID->propertyMap().size())
2184 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2186 Vector<X86Assembler::JmpSrc, 16> failureCases;
2187 // check eax is an object of the right StructureID.
2188 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2189 failureCases.append(m_jit.emitUnlinkedJne());
2190 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2191 failureCases.append(m_jit.emitUnlinkedJne());
2192 Vector<X86Assembler::JmpSrc> successCases;
2195 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2196 // proto(ecx) = baseObject->structureID()->prototype()
2197 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_type), X86::ecx);
2198 failureCases.append(m_jit.emitUnlinkedJne());
2199 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2201 // ecx = baseObject->m_structureID
2202 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2203 // null check the prototype
2204 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2205 successCases.append(m_jit.emitUnlinkedJe());
2207 // Check the structure id
2208 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2209 failureCases.append(m_jit.emitUnlinkedJne());
2211 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2212 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_type), X86::ecx);
2213 failureCases.append(m_jit.emitUnlinkedJne());
2214 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2217 failureCases.append(m_jit.emitUnlinkedJne());
2218 for (unsigned i = 0; i < successCases.size(); ++i)
2219 m_jit.link(successCases[i], m_jit.label());
2221 X86Assembler::JmpSrc callTarget;
2222 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2223 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2224 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2225 // codeblock should ensure oldStructureID->m_refCount > 0
2226 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2227 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2228 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2231 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2232 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2234 // Slow case transition -- we're going to need to quite a bit of work,
2235 // so just make a call
2236 m_jit.pushl_r(X86::edx);
2237 m_jit.pushl_r(X86::eax);
2238 m_jit.movl_i32r(cachedOffset, X86::eax);
2239 m_jit.pushl_r(X86::eax);
2240 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2241 m_jit.pushl_r(X86::eax);
2242 callTarget = m_jit.emitCall();
2243 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2246 void* code = m_jit.copy();
2249 for (unsigned i = 0; i < failureCases.size(); ++i)
2250 X86Assembler::link(code, failureCases[i], reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2252 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2253 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2255 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2257 ctiRepatchCallByReturnAddress(returnAddress, code);
2260 void* CTI::privateCompileArrayLengthTrampoline()
2262 // Check eax is an array
2263 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2264 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2265 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2266 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2268 // Checks out okay! - get the length from the storage
2269 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2270 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2272 m_jit.addl_rr(X86::eax, X86::eax);
2273 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2274 m_jit.addl_i8r(1, X86::eax);
2278 void* code = m_jit.copy();
2281 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2282 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2283 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2288 void* CTI::privateCompileStringLengthTrampoline()
2290 // Check eax is a string
2291 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2292 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2293 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2294 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2296 // Checks out okay! - get the length from the Ustring.
2297 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2298 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2300 m_jit.addl_rr(X86::eax, X86::eax);
2301 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2302 m_jit.addl_i8r(1, X86::eax);
2306 void* code = m_jit.copy();
2309 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2310 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2311 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2316 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2318 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2320 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2321 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2322 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2324 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2325 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2326 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2329 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2331 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2333 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2334 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2335 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2337 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2338 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2339 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2342 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2344 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2346 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2347 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2349 // Check eax is an array
2350 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2351 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2352 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2353 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2355 // Checks out okay! - get the length from the storage
2356 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2357 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2359 m_jit.addl_rr(X86::ecx, X86::ecx);
2360 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2361 m_jit.addl_i8r(1, X86::ecx);
2363 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2365 void* code = m_jit.copy();
2368 // Use the repatch information to link the failure cases back to the original slow case routine.
2369 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2370 X86Assembler::link(code, failureCases1, slowCaseBegin);
2371 X86Assembler::link(code, failureCases2, slowCaseBegin);
2372 X86Assembler::link(code, failureCases3, slowCaseBegin);
2374 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2375 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2376 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2378 // Track the stub we have created so that it will be deleted later.
2379 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2381 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2382 // FIXME: should revert this repatching, on failure.
2383 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2384 X86Assembler::repatchBranchOffset(jmpLocation, code);
2387 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
2389 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
2390 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
2391 m_jit.movl_mr(index * sizeof(Register), dst, dst);
2394 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
2396 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
2397 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
2398 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
2403 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
2405 // TODO: better error messages
2406 if (pattern.size() > MaxPatternSize) {
2407 *error_ptr = "regular expression too large";
2411 X86Assembler jit(exec->machine()->jitCodeBuffer());
2412 WRECParser parser(pattern, ignoreCase, multiline, jit);
2414 jit.emitConvertToFastCall();
2416 // Preserve regs & initialize outputRegister.
2417 jit.pushl_r(WRECGenerator::outputRegister);
2418 jit.pushl_r(WRECGenerator::currentValueRegister);
2419 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
2420 jit.pushl_r(WRECGenerator::currentPositionRegister);
2421 // load output pointer
2426 , X86::esp, WRECGenerator::outputRegister);
2428 // restart point on match fail.
2429 WRECGenerator::JmpDst nextLabel = jit.label();
2431 // (1) Parse Disjunction:
2433 // Parsing the disjunction should fully consume the pattern.
2434 JmpSrcVector failures;
2435 parser.parseDisjunction(failures);
2436 if (parser.isEndOfPattern()) {
2437 parser.m_err = WRECParser::Error_malformedPattern;
2440 // TODO: better error messages
2441 *error_ptr = "TODO: better error messages";
2446 // Set return value & pop registers from the stack.
2448 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
2449 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
2451 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
2452 jit.popl_r(X86::eax);
2453 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2454 jit.popl_r(WRECGenerator::currentValueRegister);
2455 jit.popl_r(WRECGenerator::outputRegister);
2458 jit.link(noOutput, jit.label());
2460 jit.popl_r(X86::eax);
2461 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2462 jit.popl_r(WRECGenerator::currentValueRegister);
2463 jit.popl_r(WRECGenerator::outputRegister);
2467 // All fails link to here. Progress the start point & if it is within scope, loop.
2468 // Otherwise, return fail value.
2469 WRECGenerator::JmpDst here = jit.label();
2470 for (unsigned i = 0; i < failures.size(); ++i)
2471 jit.link(failures[i], here);
2474 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
2475 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
2476 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
2477 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
2478 jit.link(jit.emitUnlinkedJle(), nextLabel);
2480 jit.addl_i8r(4, X86::esp);
2482 jit.movl_i32r(-1, X86::eax);
2483 jit.popl_r(WRECGenerator::currentValueRegister);
2484 jit.popl_r(WRECGenerator::outputRegister);
2487 *numSubpatterns_ptr = parser.m_numSubpatterns;
2489 void* code = jit.copy();
2494 #endif // ENABLE(WREC)
2498 #endif // ENABLE(CTI)