2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
34 #include "wrec/WREC.h"
40 #if COMPILER(GCC) && PLATFORM(X86)
42 ".globl _ctiTrampoline" "\n"
43 "_ctiTrampoline:" "\n"
46 "subl $0x24, %esp" "\n"
47 "movl $512, %esi" "\n"
48 "call *0x30(%esp)" "\n" //Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
49 "addl $0x24, %esp" "\n"
56 ".globl _ctiVMThrowTrampoline" "\n"
57 "_ctiVMThrowTrampoline:" "\n"
59 "movl 0x34(%esp), %ecx" "\n" //Ox34 = 0x0D * 4, 0x0D = CTI_ARGS_exec
60 "cmpl $0, 8(%ecx)" "\n"
65 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
66 "addl $0x24, %esp" "\n"
76 __declspec(naked) JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**)
92 __declspec(naked) void ctiVMThrowTrampoline()
96 call JSC::Machine::cti_vm_throw;
109 // get arg puts an arg from the SF register array into a h/w register
110 ALWAYS_INLINE void CTI::emitGetArg(unsigned src, X86Assembler::RegisterID dst)
112 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
113 if (src < m_codeBlock->constantRegisters.size()) {
114 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
115 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
117 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
120 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
121 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
123 if (src < m_codeBlock->constantRegisters.size()) {
124 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
125 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
127 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
128 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
132 // puts an arg onto the stack, as an arg to a context threaded function.
133 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
135 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
138 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
140 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
143 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
145 if (src < m_codeBlock->constantRegisters.size()) {
146 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
147 return JSImmediate::isNumber(js) ? js : 0;
152 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
154 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
157 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
159 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
162 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
164 m_jit.movl_rm(from, -((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi);
167 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
169 m_jit.movl_mr(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi, to);
172 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
174 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
175 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
178 #if ENABLE(SAMPLING_TOOL)
179 unsigned inCalledCode = 0;
182 void ctiSetReturnAddress(void** where, void* what)
187 void ctiRepatchCallByReturnAddress(void* where, void* what)
189 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
194 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
200 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
202 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
203 m_jit.cmpl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
204 X86Assembler::JmpSrc noException = m_jit.emitUnlinkedJe();
206 m_jit.link(noException, m_jit.label());
209 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
212 if (src1 < m_codeBlock->constantRegisters.size()) {
213 JSValue* js = m_codeBlock->constantRegisters[src1].jsValue(m_exec);
215 JSImmediate::isImmediate(js) ?
216 (JSImmediate::isNumber(js) ? 'i' :
217 JSImmediate::isBoolean(js) ? 'b' :
218 js->isUndefined() ? 'u' :
219 js->isNull() ? 'n' : '?')
221 (js->isString() ? 's' :
222 js->isObject() ? 'o' :
226 if (src2 < m_codeBlock->constantRegisters.size()) {
227 JSValue* js = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
229 JSImmediate::isImmediate(js) ?
230 (JSImmediate::isNumber(js) ? 'i' :
231 JSImmediate::isBoolean(js) ? 'b' :
232 js->isUndefined() ? 'u' :
233 js->isNull() ? 'n' : '?')
235 (js->isString() ? 's' :
236 js->isObject() ? 'o' :
239 if ((which1 != '*') | (which2 != '*'))
240 fprintf(stderr, "Types %c %c\n", which1, which2);
245 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
247 #if ENABLE(SAMPLING_TOOL)
248 m_jit.movl_i32m(1, &inCalledCode);
250 X86Assembler::JmpSrc call = m_jit.emitCall();
251 m_calls.append(CallRecord(call, helper, opcodeIndex));
252 emitDebugExceptionCheck();
253 #if ENABLE(SAMPLING_TOOL)
254 m_jit.movl_i32m(0, &inCalledCode);
260 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
262 #if ENABLE(SAMPLING_TOOL)
263 m_jit.movl_i32m(1, &inCalledCode);
265 X86Assembler::JmpSrc call = m_jit.emitCall();
266 m_calls.append(CallRecord(call, helper, opcodeIndex));
267 emitDebugExceptionCheck();
268 #if ENABLE(SAMPLING_TOOL)
269 m_jit.movl_i32m(0, &inCalledCode);
275 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
277 #if ENABLE(SAMPLING_TOOL)
278 m_jit.movl_i32m(1, &inCalledCode);
280 X86Assembler::JmpSrc call = m_jit.emitCall();
281 m_calls.append(CallRecord(call, helper, opcodeIndex));
282 emitDebugExceptionCheck();
283 #if ENABLE(SAMPLING_TOOL)
284 m_jit.movl_i32m(0, &inCalledCode);
290 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
292 #if ENABLE(SAMPLING_TOOL)
293 m_jit.movl_i32m(1, &inCalledCode);
295 X86Assembler::JmpSrc call = m_jit.emitCall();
296 m_calls.append(CallRecord(call, helper, opcodeIndex));
297 emitDebugExceptionCheck();
298 #if ENABLE(SAMPLING_TOOL)
299 m_jit.movl_i32m(0, &inCalledCode);
305 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
307 #if ENABLE(SAMPLING_TOOL)
308 m_jit.movl_i32m(1, &inCalledCode);
310 X86Assembler::JmpSrc call = m_jit.emitCall();
311 m_calls.append(CallRecord(call, helper, opcodeIndex));
312 emitDebugExceptionCheck();
313 #if ENABLE(SAMPLING_TOOL)
314 m_jit.movl_i32m(0, &inCalledCode);
320 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
322 m_jit.testl_i32r(JSImmediate::TagMask, reg);
323 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
326 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImm(X86Assembler::RegisterID reg, unsigned opcodeIndex)
328 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
329 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
332 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImms(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
334 m_jit.movl_rr(reg1, X86::ecx);
335 m_jit.andl_rr(reg2, X86::ecx);
336 emitJumpSlowCaseIfNotImm(X86::ecx, opcodeIndex);
339 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
341 ASSERT(JSImmediate::isNumber(imm));
342 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
345 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
347 // op_mod relies on this being a sub - setting zf if result is 0.
348 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
351 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
353 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
356 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
358 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
361 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
363 m_jit.sarl_i8r(1, reg);
366 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
368 m_jit.addl_rr(reg, reg);
369 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
370 emitFastArithReTagImmediate(reg);
373 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
375 m_jit.addl_rr(reg, reg);
376 emitFastArithReTagImmediate(reg);
379 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
380 : m_jit(machine->jitCodeBuffer())
383 , m_codeBlock(codeBlock)
384 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
385 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
389 #define CTI_COMPILE_BINARY_OP(name) \
391 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
392 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
393 emitCall(i, Machine::cti_##name); \
394 emitPutResult(instruction[i + 1].u.operand); \
399 #define CTI_COMPILE_UNARY_OP(name) \
401 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
402 emitCall(i, Machine::cti_##name); \
403 emitPutResult(instruction[i + 1].u.operand); \
408 #if ENABLE(SAMPLING_TOOL)
409 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
412 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
414 if (type == OpConstruct) {
415 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
416 emitPutArgConstant(instruction[i + 5].u.operand, 12);
417 emitPutArgConstant(instruction[i + 4].u.operand, 8);
418 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
420 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
421 emitPutArgConstant(instruction[i + 5].u.operand, 12);
422 emitPutArgConstant(instruction[i + 4].u.operand, 8);
423 // FIXME: should this be loaded dynamically off m_exec?
424 int thisVal = instruction[i + 3].u.operand;
425 if (thisVal == missingThisObjectMarker()) {
426 emitPutArgConstant(reinterpret_cast<unsigned>(m_exec->globalThisValue()), 4);
428 emitGetPutArg(thisVal, 4, X86::ecx);
431 X86Assembler::JmpSrc wasEval;
432 if (type == OpCallEval) {
433 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
434 emitCall(i, Machine::cti_op_call_eval);
435 m_jit.emitRestoreArgumentReference();
437 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
439 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
440 wasEval = m_jit.emitUnlinkedJne();
442 // this reloads the first arg into ecx (checked just below).
443 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
445 // this sets up the first arg, and explicitly leaves the value in ecx (checked just below).
446 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
447 emitPutArg(X86::ecx, 0);
450 // Fast check for JS function.
451 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
452 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
453 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
454 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
455 m_jit.link(isNotObject, m_jit.label());
457 // This handles host functions
458 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
459 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
461 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
462 m_jit.link(isJSFunction, m_jit.label());
464 // This handles JSFunctions
465 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction));
466 m_jit.call_r(X86::eax);
467 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
469 X86Assembler::JmpDst end = m_jit.label();
470 m_jit.link(wasNotJSFunction, end);
471 if (type == OpCallEval)
472 m_jit.link(wasEval, end);
474 emitPutResult(instruction[i + 1].u.operand);
477 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
479 m_jit.subl_i8r(1, X86::esi);
480 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
481 emitCall(opcodeIndex, Machine::cti_timeout_check);
483 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
484 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_globalData), X86::ecx, X86::ecx);
485 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
486 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
487 m_jit.link(skipTimeout, m_jit.label());
490 void CTI::privateCompileMainPass()
492 Instruction* instruction = m_codeBlock->instructions.begin();
493 unsigned instructionCount = m_codeBlock->instructions.size();
495 unsigned structureIDInstructionIndex = 0;
497 for (unsigned i = 0; i < instructionCount; ) {
498 m_labels[i] = m_jit.label();
500 #if ENABLE(SAMPLING_TOOL)
501 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
504 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
505 m_jit.emitRestoreArgumentReference();
506 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
508 unsigned src = instruction[i + 2].u.operand;
509 if (src < m_codeBlock->constantRegisters.size())
510 m_jit.movl_i32r(reinterpret_cast<unsigned>(m_codeBlock->constantRegisters[src].jsValue(m_exec)), X86::edx);
512 emitGetArg(src, X86::edx);
513 emitPutResult(instruction[i + 1].u.operand, X86::edx);
518 unsigned dst = instruction[i + 1].u.operand;
519 unsigned src1 = instruction[i + 2].u.operand;
520 unsigned src2 = instruction[i + 3].u.operand;
521 if (src2 < m_codeBlock->constantRegisters.size()) {
522 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
523 if (JSImmediate::isNumber(value)) {
524 emitGetArg(src1, X86::eax);
525 emitJumpSlowCaseIfNotImm(X86::eax, i);
526 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
527 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
532 } else if (!(src1 < m_codeBlock->constantRegisters.size())) {
533 emitGetArg(src1, X86::eax);
534 emitGetArg(src2, X86::edx);
535 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
536 emitFastArithDeTagImmediate(X86::eax);
537 m_jit.addl_rr(X86::edx, X86::eax);
538 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
543 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
544 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
545 emitCall(i, Machine::cti_op_add);
546 emitPutResult(instruction[i + 1].u.operand);
551 if (m_codeBlock->needsFullScopeChain)
552 emitCall(i, Machine::cti_op_end);
553 emitGetArg(instruction[i + 1].u.operand, X86::eax);
554 #if ENABLE(SAMPLING_TOOL)
555 m_jit.movl_i32m(-1, ¤tOpcodeID);
557 m_jit.pushl_m(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi);
563 unsigned target = instruction[i + 1].u.operand;
564 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
569 int srcDst = instruction[i + 1].u.operand;
570 emitGetArg(srcDst, X86::eax);
571 emitJumpSlowCaseIfNotImm(X86::eax, i);
572 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
573 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
574 emitPutResult(srcDst, X86::eax);
579 emitSlowScriptCheck(i);
581 unsigned target = instruction[i + 1].u.operand;
582 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
586 case op_loop_if_less: {
587 emitSlowScriptCheck(i);
589 unsigned target = instruction[i + 3].u.operand;
590 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
592 emitGetArg(instruction[i + 1].u.operand, X86::edx);
593 emitJumpSlowCaseIfNotImm(X86::edx, i);
594 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
595 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
597 emitGetArg(instruction[i + 1].u.operand, X86::eax);
598 emitGetArg(instruction[i + 2].u.operand, X86::edx);
599 emitJumpSlowCaseIfNotImm(X86::eax, i);
600 emitJumpSlowCaseIfNotImm(X86::edx, i);
601 m_jit.cmpl_rr(X86::edx, X86::eax);
602 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
607 case op_loop_if_lesseq: {
608 emitSlowScriptCheck(i);
610 unsigned target = instruction[i + 3].u.operand;
611 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
613 emitGetArg(instruction[i + 1].u.operand, X86::edx);
614 emitJumpSlowCaseIfNotImm(X86::edx, i);
615 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
616 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
618 emitGetArg(instruction[i + 1].u.operand, X86::eax);
619 emitGetArg(instruction[i + 2].u.operand, X86::edx);
620 emitJumpSlowCaseIfNotImm(X86::eax, i);
621 emitJumpSlowCaseIfNotImm(X86::edx, i);
622 m_jit.cmpl_rr(X86::edx, X86::eax);
623 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
628 case op_new_object: {
629 emitCall(i, Machine::cti_op_new_object);
630 emitPutResult(instruction[i + 1].u.operand);
635 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
636 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
637 // such that the StructureID & offset are always at the same distance from this.
639 emitGetArg(instruction[i + 1].u.operand, X86::eax);
640 emitGetArg(instruction[i + 3].u.operand, X86::edx);
642 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
643 X86Assembler::JmpDst hotPathBegin = m_jit.label();
644 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
645 ++structureIDInstructionIndex;
647 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
648 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
649 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
650 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
651 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
652 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
654 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
655 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
656 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
657 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
663 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
664 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
665 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
666 // to jump back to if one of these trampolies finds a match.
668 emitGetArg(instruction[i + 2].u.operand, X86::eax);
670 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
672 X86Assembler::JmpDst hotPathBegin = m_jit.label();
673 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
674 ++structureIDInstructionIndex;
676 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
677 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
678 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
679 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
680 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
682 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
683 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
684 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
685 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
690 case op_instanceof: {
691 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
692 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
693 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
694 emitCall(i, Machine::cti_op_instanceof);
695 emitPutResult(instruction[i + 1].u.operand);
700 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
701 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
702 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
703 emitCall(i, Machine::cti_op_del_by_id);
704 emitPutResult(instruction[i + 1].u.operand);
709 unsigned dst = instruction[i + 1].u.operand;
710 unsigned src1 = instruction[i + 2].u.operand;
711 unsigned src2 = instruction[i + 3].u.operand;
712 if (src1 < m_codeBlock->constantRegisters.size() || src2 < m_codeBlock->constantRegisters.size()) {
713 unsigned constant = src1;
714 unsigned nonconstant = src2;
715 if (!(src1 < m_codeBlock->constantRegisters.size())) {
719 JSValue* value = m_codeBlock->constantRegisters[constant].jsValue(m_exec);
720 if (JSImmediate::isNumber(value)) {
721 emitGetArg(nonconstant, X86::eax);
722 emitJumpSlowCaseIfNotImm(X86::eax, i);
723 emitFastArithImmToInt(X86::eax);
724 m_jit.imull_i32r( X86::eax, getDeTaggedConstantImmediate(value), X86::eax);
725 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
726 emitFastArithPotentiallyReTagImmediate(X86::eax);
733 emitGetArg(src1, X86::eax);
734 emitGetArg(src2, X86::edx);
735 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
736 emitFastArithDeTagImmediate(X86::eax);
737 emitFastArithImmToInt(X86::edx);
738 m_jit.imull_rr(X86::edx, X86::eax);
739 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
740 emitFastArithPotentiallyReTagImmediate(X86::eax);
746 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
747 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
748 emitCall(i, Machine::cti_op_new_func);
749 emitPutResult(instruction[i + 1].u.operand);
754 compileOpCall(instruction, i);
758 case op_get_global_var: {
759 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
760 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
761 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
762 emitPutResult(instruction[i + 1].u.operand, X86::eax);
766 case op_put_global_var: {
767 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
768 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
769 emitGetArg(instruction[i + 3].u.operand, X86::edx);
770 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
774 case op_get_scoped_var: {
775 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
777 emitGetCTIParam(CTI_ARGS_scopeChain, X86::eax);
779 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
781 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
782 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
783 emitPutResult(instruction[i + 1].u.operand);
787 case op_put_scoped_var: {
788 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
790 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
791 emitGetArg(instruction[i + 3].u.operand, X86::eax);
793 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
795 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
796 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
801 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
802 emitCall(i, Machine::cti_op_ret);
804 m_jit.pushl_m(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi);
810 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
811 emitPutArg(X86::edx, 0);
812 emitPutArgConstant(instruction[i + 3].u.operand, 4);
813 emitCall(i, Machine::cti_op_new_array);
814 emitPutResult(instruction[i + 1].u.operand);
819 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
820 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
821 emitCall(i, Machine::cti_op_resolve);
822 emitPutResult(instruction[i + 1].u.operand);
827 compileOpCall(instruction, i, OpConstruct);
831 case op_construct_verify: {
832 emitPutArgConstant(instruction[i + 1].u.operand, 0);
833 emitPutArgConstant(instruction[i + 2].u.operand, 4);
834 emitCall(i, Machine::cti_op_construct_verify);
838 case op_get_by_val: {
839 emitGetArg(instruction[i + 2].u.operand, X86::eax);
840 emitGetArg(instruction[i + 3].u.operand, X86::edx);
841 emitJumpSlowCaseIfNotImm(X86::edx, i);
842 emitFastArithImmToInt(X86::edx);
843 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
844 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
845 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
846 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
848 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
849 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
850 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
851 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
853 // Get the value from the vector
854 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
855 emitPutResult(instruction[i + 1].u.operand);
859 case op_resolve_func: {
860 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
861 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
862 emitCall(i, Machine::cti_op_resolve_func);
863 emitPutResult(instruction[i + 1].u.operand);
864 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
865 emitPutResult(instruction[i + 2].u.operand);
870 emitGetArg(instruction[i + 2].u.operand, X86::eax);
871 emitGetArg(instruction[i + 3].u.operand, X86::edx);
872 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
873 m_jit.subl_rr(X86::edx, X86::eax);
874 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
875 emitFastArithReTagImmediate(X86::eax);
876 emitPutResult(instruction[i + 1].u.operand);
880 case op_put_by_val: {
881 emitGetArg(instruction[i + 1].u.operand, X86::eax);
882 emitGetArg(instruction[i + 2].u.operand, X86::edx);
883 emitJumpSlowCaseIfNotImm(X86::edx, i);
884 emitFastArithImmToInt(X86::edx);
885 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
886 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
887 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
888 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
890 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
891 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
892 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
893 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
894 // No; oh well, check if the access if within the vector - if so, we may still be okay.
895 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
896 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
898 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
899 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
900 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
901 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
903 // All good - put the value into the array.
904 m_jit.link(inFastVector, m_jit.label());
905 emitGetArg(instruction[i + 3].u.operand, X86::eax);
906 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
910 CTI_COMPILE_BINARY_OP(op_lesseq)
911 case op_loop_if_true: {
912 emitSlowScriptCheck(i);
914 unsigned target = instruction[i + 2].u.operand;
915 emitGetArg(instruction[i + 1].u.operand, X86::eax);
917 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
918 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
919 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
920 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
922 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
923 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
924 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
925 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
927 m_jit.link(isZero, m_jit.label());
931 case op_resolve_base: {
932 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
933 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
934 emitCall(i, Machine::cti_op_resolve_base);
935 emitPutResult(instruction[i + 1].u.operand);
940 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
941 emitCall(i, Machine::cti_op_negate);
942 emitPutResult(instruction[i + 1].u.operand);
946 case op_resolve_skip: {
947 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
948 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
949 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
950 emitCall(i, Machine::cti_op_resolve_skip);
951 emitPutResult(instruction[i + 1].u.operand);
955 CTI_COMPILE_BINARY_OP(op_div)
957 int srcDst = instruction[i + 1].u.operand;
958 emitGetArg(srcDst, X86::eax);
959 emitJumpSlowCaseIfNotImm(X86::eax, i);
960 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
961 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
962 emitPutResult(srcDst, X86::eax);
967 unsigned target = instruction[i + 3].u.operand;
968 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
970 emitGetArg(instruction[i + 1].u.operand, X86::edx);
971 emitJumpSlowCaseIfNotImm(X86::edx, i);
972 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
973 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
975 emitGetArg(instruction[i + 1].u.operand, X86::eax);
976 emitGetArg(instruction[i + 2].u.operand, X86::edx);
977 emitJumpSlowCaseIfNotImm(X86::eax, i);
978 emitJumpSlowCaseIfNotImm(X86::edx, i);
979 m_jit.cmpl_rr(X86::edx, X86::eax);
980 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
986 emitGetArg(instruction[i + 2].u.operand, X86::eax);
987 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
988 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
989 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
990 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
991 emitPutResult(instruction[i + 1].u.operand);
996 unsigned target = instruction[i + 2].u.operand;
997 emitGetArg(instruction[i + 1].u.operand, X86::eax);
999 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1000 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1001 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1002 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1004 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1005 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1006 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1007 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1009 m_jit.link(isNonZero, m_jit.label());
1014 int srcDst = instruction[i + 2].u.operand;
1015 emitGetArg(srcDst, X86::eax);
1016 m_jit.movl_rr(X86::eax, X86::edx);
1017 emitJumpSlowCaseIfNotImm(X86::eax, i);
1018 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1019 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1020 emitPutResult(srcDst, X86::edx);
1021 emitPutResult(instruction[i + 1].u.operand);
1025 case op_unexpected_load: {
1026 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1027 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1028 emitPutResult(instruction[i + 1].u.operand);
1033 int retAddrDst = instruction[i + 1].u.operand;
1034 int target = instruction[i + 2].u.operand;
1035 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1036 X86Assembler::JmpDst addrPosition = m_jit.label();
1037 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1038 X86Assembler::JmpDst sretTarget = m_jit.label();
1039 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1044 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1048 CTI_COMPILE_BINARY_OP(op_eq)
1050 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1051 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1052 emitJumpSlowCaseIfNotImm(X86::eax, i);
1053 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1054 emitFastArithImmToInt(X86::eax);
1055 emitFastArithImmToInt(X86::ecx);
1056 m_jit.shll_CLr(X86::eax);
1057 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1058 emitPutResult(instruction[i + 1].u.operand);
1063 unsigned src1 = instruction[i + 2].u.operand;
1064 unsigned src2 = instruction[i + 3].u.operand;
1065 unsigned dst = instruction[i + 1].u.operand;
1066 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1067 emitGetArg(src2, X86::eax);
1068 emitJumpSlowCaseIfNotImm(X86::eax, i);
1069 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1071 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1072 emitGetArg(src1, X86::eax);
1073 emitJumpSlowCaseIfNotImm(X86::eax, i);
1074 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1077 emitGetArg(src1, X86::eax);
1078 emitGetArg(src2, X86::edx);
1079 m_jit.andl_rr(X86::edx, X86::eax);
1080 emitJumpSlowCaseIfNotImm(X86::eax, i);
1087 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1088 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1089 emitJumpSlowCaseIfNotImm(X86::eax, i);
1090 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1091 emitFastArithImmToInt(X86::ecx);
1092 m_jit.sarl_CLr(X86::eax);
1093 emitFastArithPotentiallyReTagImmediate(X86::eax);
1094 emitPutResult(instruction[i + 1].u.operand);
1099 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1100 emitJumpSlowCaseIfNotImm(X86::eax, i);
1101 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1102 emitPutResult(instruction[i + 1].u.operand);
1106 case op_resolve_with_base: {
1107 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1108 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1109 emitCall(i, Machine::cti_op_resolve_with_base);
1110 emitPutResult(instruction[i + 1].u.operand);
1111 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1112 emitPutResult(instruction[i + 2].u.operand);
1116 case op_new_func_exp: {
1117 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1118 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1119 emitCall(i, Machine::cti_op_new_func_exp);
1120 emitPutResult(instruction[i + 1].u.operand);
1125 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1126 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1127 emitJumpSlowCaseIfNotImm(X86::eax, i);
1128 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1129 emitFastArithDeTagImmediate(X86::eax);
1130 emitFastArithDeTagImmediate(X86::ecx);
1131 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1133 m_jit.idivl_r(X86::ecx);
1134 emitFastArithReTagImmediate(X86::edx);
1135 m_jit.movl_rr(X86::edx, X86::eax);
1136 emitPutResult(instruction[i + 1].u.operand);
1141 unsigned target = instruction[i + 2].u.operand;
1142 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1144 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1145 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1146 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1147 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1149 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1150 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1151 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1152 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1154 m_jit.link(isZero, m_jit.label());
1158 CTI_COMPILE_BINARY_OP(op_less)
1159 CTI_COMPILE_BINARY_OP(op_neq)
1161 int srcDst = instruction[i + 2].u.operand;
1162 emitGetArg(srcDst, X86::eax);
1163 m_jit.movl_rr(X86::eax, X86::edx);
1164 emitJumpSlowCaseIfNotImm(X86::eax, i);
1165 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1166 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1167 emitPutResult(srcDst, X86::edx);
1168 emitPutResult(instruction[i + 1].u.operand);
1172 CTI_COMPILE_BINARY_OP(op_urshift)
1174 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1175 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1176 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
1177 m_jit.xorl_rr(X86::edx, X86::eax);
1178 emitFastArithReTagImmediate(X86::eax);
1179 emitPutResult(instruction[i + 1].u.operand);
1183 case op_new_regexp: {
1184 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1185 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1186 emitCall(i, Machine::cti_op_new_regexp);
1187 emitPutResult(instruction[i + 1].u.operand);
1192 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1193 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1194 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
1195 m_jit.orl_rr(X86::edx, X86::eax);
1196 emitPutResult(instruction[i + 1].u.operand);
1200 case op_call_eval: {
1201 compileOpCall(instruction, i, OpCallEval);
1206 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1207 emitCall(i, Machine::cti_op_throw);
1208 m_jit.addl_i8r(0x24, X86::esp);
1209 m_jit.popl_r(X86::edi);
1210 m_jit.popl_r(X86::esi);
1215 case op_get_pnames: {
1216 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1217 emitCall(i, Machine::cti_op_get_pnames);
1218 emitPutResult(instruction[i + 1].u.operand);
1222 case op_next_pname: {
1223 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1224 unsigned target = instruction[i + 3].u.operand;
1225 emitCall(i, Machine::cti_op_next_pname);
1226 m_jit.testl_rr(X86::eax, X86::eax);
1227 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1228 emitPutResult(instruction[i + 1].u.operand);
1229 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1230 m_jit.link(endOfIter, m_jit.label());
1234 case op_push_scope: {
1235 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1236 emitCall(i, Machine::cti_op_push_scope);
1240 case op_pop_scope: {
1241 emitCall(i, Machine::cti_op_pop_scope);
1245 CTI_COMPILE_UNARY_OP(op_typeof)
1246 CTI_COMPILE_UNARY_OP(op_is_undefined)
1247 CTI_COMPILE_UNARY_OP(op_is_boolean)
1248 CTI_COMPILE_UNARY_OP(op_is_number)
1249 CTI_COMPILE_UNARY_OP(op_is_string)
1250 CTI_COMPILE_UNARY_OP(op_is_object)
1251 CTI_COMPILE_UNARY_OP(op_is_function)
1252 CTI_COMPILE_BINARY_OP(op_stricteq)
1253 CTI_COMPILE_BINARY_OP(op_nstricteq)
1254 case op_to_jsnumber: {
1255 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1256 emitCall(i, Machine::cti_op_to_jsnumber);
1257 emitPutResult(instruction[i + 1].u.operand);
1262 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1263 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1264 emitCall(i, Machine::cti_op_in);
1265 emitPutResult(instruction[i + 1].u.operand);
1269 case op_push_new_scope: {
1270 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1271 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1272 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1273 emitCall(i, Machine::cti_op_push_new_scope);
1274 emitPutResult(instruction[i + 1].u.operand);
1279 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1280 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1281 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_exception), X86::ecx, X86::eax);
1282 m_jit.movl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
1283 emitPutResult(instruction[i + 1].u.operand);
1287 case op_jmp_scopes: {
1288 unsigned count = instruction[i + 1].u.operand;
1289 emitPutArgConstant(count, 0);
1290 emitCall(i, Machine::cti_op_jmp_scopes);
1291 unsigned target = instruction[i + 2].u.operand;
1292 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1296 case op_put_by_index: {
1297 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1298 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1299 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1300 emitCall(i, Machine::cti_op_put_by_index);
1304 case op_switch_imm: {
1305 unsigned tableIndex = instruction[i + 1].u.operand;
1306 unsigned defaultOffset = instruction[i + 2].u.operand;
1307 unsigned scrutinee = instruction[i + 3].u.operand;
1309 // create jump table for switch destinations, track this switch statement.
1310 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1311 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1312 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1314 emitGetPutArg(scrutinee, 0, X86::ecx);
1315 emitPutArgConstant(tableIndex, 4);
1316 emitCall(i, Machine::cti_op_switch_imm);
1317 m_jit.jmp_r(X86::eax);
1321 case op_switch_char: {
1322 unsigned tableIndex = instruction[i + 1].u.operand;
1323 unsigned defaultOffset = instruction[i + 2].u.operand;
1324 unsigned scrutinee = instruction[i + 3].u.operand;
1326 // create jump table for switch destinations, track this switch statement.
1327 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1328 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1329 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1331 emitGetPutArg(scrutinee, 0, X86::ecx);
1332 emitPutArgConstant(tableIndex, 4);
1333 emitCall(i, Machine::cti_op_switch_char);
1334 m_jit.jmp_r(X86::eax);
1338 case op_switch_string: {
1339 unsigned tableIndex = instruction[i + 1].u.operand;
1340 unsigned defaultOffset = instruction[i + 2].u.operand;
1341 unsigned scrutinee = instruction[i + 3].u.operand;
1343 // create jump table for switch destinations, track this switch statement.
1344 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1345 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1347 emitGetPutArg(scrutinee, 0, X86::ecx);
1348 emitPutArgConstant(tableIndex, 4);
1349 emitCall(i, Machine::cti_op_switch_string);
1350 m_jit.jmp_r(X86::eax);
1354 case op_del_by_val: {
1355 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1356 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1357 emitCall(i, Machine::cti_op_del_by_val);
1358 emitPutResult(instruction[i + 1].u.operand);
1362 case op_put_getter: {
1363 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1364 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1365 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1366 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1367 emitCall(i, Machine::cti_op_put_getter);
1371 case op_put_setter: {
1372 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1373 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1374 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1375 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1376 emitCall(i, Machine::cti_op_put_setter);
1380 case op_new_error: {
1381 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1382 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1383 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1384 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1385 emitCall(i, Machine::cti_op_new_error);
1386 emitPutResult(instruction[i + 1].u.operand);
1391 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1392 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1393 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1394 emitCall(i, Machine::cti_op_debug);
1399 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1400 emitCall(i, Machine::cti_op_eq_null);
1401 emitPutResult(instruction[i + 1].u.operand);
1406 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1407 emitCall(i, Machine::cti_op_neq_null);
1408 emitPutResult(instruction[i + 1].u.operand);
1412 case op_get_array_length:
1413 case op_get_by_id_chain:
1414 case op_get_by_id_generic:
1415 case op_get_by_id_proto:
1416 case op_get_by_id_self:
1417 case op_get_string_length:
1418 case op_put_by_id_generic:
1419 case op_put_by_id_replace:
1420 case op_put_by_id_transition:
1421 ASSERT_NOT_REACHED();
1425 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1429 void CTI::privateCompileLinkPass()
1431 unsigned jmpTableCount = m_jmpTable.size();
1432 for (unsigned i = 0; i < jmpTableCount; ++i)
1433 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
1437 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
1439 m_jit.link(iter->from, m_jit.label()); \
1440 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
1441 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
1442 emitCall(i, Machine::cti_##name); \
1443 emitPutResult(instruction[i + 1].u.operand); \
1448 void CTI::privateCompileSlowCases()
1450 unsigned structureIDInstructionIndex = 0;
1452 Instruction* instruction = m_codeBlock->instructions.begin();
1453 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
1454 unsigned i = iter->to;
1455 m_jit.emitRestoreArgumentReference();
1456 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
1458 unsigned dst = instruction[i + 1].u.operand;
1459 unsigned src2 = instruction[i + 3].u.operand;
1460 if (src2 < m_codeBlock->constantRegisters.size()) {
1461 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
1462 if (JSImmediate::isNumber(value)) {
1463 X86Assembler::JmpSrc notImm = iter->from;
1464 m_jit.link((++iter)->from, m_jit.label());
1465 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1466 m_jit.link(notImm, m_jit.label());
1467 emitPutArg(X86::eax, 0);
1468 emitGetPutArg(src2, 4, X86::ecx);
1469 emitCall(i, Machine::cti_op_add);
1476 ASSERT(!(static_cast<unsigned>(instruction[i + 2].u.operand) < m_codeBlock->constantRegisters.size()));
1478 X86Assembler::JmpSrc notImm = iter->from;
1479 m_jit.link((++iter)->from, m_jit.label());
1480 m_jit.subl_rr(X86::edx, X86::eax);
1481 emitFastArithReTagImmediate(X86::eax);
1482 m_jit.link(notImm, m_jit.label());
1483 emitPutArg(X86::eax, 0);
1484 emitPutArg(X86::edx, 4);
1485 emitCall(i, Machine::cti_op_add);
1490 case op_get_by_val: {
1491 // The slow case that handles accesses to arrays (below) may jump back up to here.
1492 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
1494 X86Assembler::JmpSrc notImm = iter->from;
1495 m_jit.link((++iter)->from, m_jit.label());
1496 m_jit.link((++iter)->from, m_jit.label());
1497 emitFastArithIntToImmNoCheck(X86::edx);
1498 m_jit.link(notImm, m_jit.label());
1499 emitPutArg(X86::eax, 0);
1500 emitPutArg(X86::edx, 4);
1501 emitCall(i, Machine::cti_op_get_by_val);
1502 emitPutResult(instruction[i + 1].u.operand);
1503 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1505 // This is slow case that handles accesses to arrays above the fast cut-off.
1506 // First, check if this is an access to the vector
1507 m_jit.link((++iter)->from, m_jit.label());
1508 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1509 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
1511 // okay, missed the fast region, but it is still in the vector. Get the value.
1512 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
1513 // Check whether the value loaded is zero; if so we need to return undefined.
1514 m_jit.testl_rr(X86::ecx, X86::ecx);
1515 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
1516 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1522 X86Assembler::JmpSrc notImm = iter->from;
1523 m_jit.link((++iter)->from, m_jit.label());
1524 m_jit.addl_rr(X86::edx, X86::eax);
1525 m_jit.link(notImm, m_jit.label());
1526 emitPutArg(X86::eax, 0);
1527 emitPutArg(X86::edx, 4);
1528 emitCall(i, Machine::cti_op_sub);
1529 emitPutResult(instruction[i + 1].u.operand);
1534 m_jit.link(iter->from, m_jit.label());
1535 m_jit.link((++iter)->from, m_jit.label());
1536 emitPutArg(X86::eax, 0);
1537 emitPutArg(X86::ecx, 4);
1538 emitCall(i, Machine::cti_op_rshift);
1539 emitPutResult(instruction[i + 1].u.operand);
1544 X86Assembler::JmpSrc notImm1 = iter->from;
1545 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1546 m_jit.link((++iter)->from, m_jit.label());
1547 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1548 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1549 m_jit.link(notImm1, m_jit.label());
1550 m_jit.link(notImm2, m_jit.label());
1551 emitPutArg(X86::eax, 0);
1552 emitPutArg(X86::ecx, 4);
1553 emitCall(i, Machine::cti_op_lshift);
1554 emitPutResult(instruction[i + 1].u.operand);
1558 case op_loop_if_less: {
1559 emitSlowScriptCheck(i);
1561 unsigned target = instruction[i + 3].u.operand;
1562 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1564 m_jit.link(iter->from, m_jit.label());
1565 emitPutArg(X86::edx, 0);
1566 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1567 emitCall(i, Machine::cti_op_loop_if_less);
1568 m_jit.testl_rr(X86::eax, X86::eax);
1569 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1571 m_jit.link(iter->from, m_jit.label());
1572 m_jit.link((++iter)->from, m_jit.label());
1573 emitPutArg(X86::eax, 0);
1574 emitPutArg(X86::edx, 4);
1575 emitCall(i, Machine::cti_op_loop_if_less);
1576 m_jit.testl_rr(X86::eax, X86::eax);
1577 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1582 case op_put_by_id: {
1583 m_jit.link(iter->from, m_jit.label());
1584 m_jit.link((++iter)->from, m_jit.label());
1586 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1587 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1588 emitPutArg(X86::eax, 0);
1589 emitPutArg(X86::edx, 8);
1590 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
1592 // Track the location of the call; this will be used to recover repatch information.
1593 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1594 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1595 ++structureIDInstructionIndex;
1600 case op_get_by_id: {
1601 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1602 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1603 // of the call (which we can use to look up the repatch information), but should a array-length or
1604 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back
1605 // the distance from the call to the head of the slow case.
1607 m_jit.link(iter->from, m_jit.label());
1608 m_jit.link((++iter)->from, m_jit.label());
1611 X86Assembler::JmpDst coldPathBegin = m_jit.label();
1613 emitPutArg(X86::eax, 0);
1614 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1615 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1616 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
1617 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
1618 emitPutResult(instruction[i + 1].u.operand);
1620 // Track the location of the call; this will be used to recover repatch information.
1621 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1622 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1623 ++structureIDInstructionIndex;
1628 case op_loop_if_lesseq: {
1629 emitSlowScriptCheck(i);
1631 unsigned target = instruction[i + 3].u.operand;
1632 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1634 m_jit.link(iter->from, m_jit.label());
1635 emitPutArg(X86::edx, 0);
1636 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1637 emitCall(i, Machine::cti_op_loop_if_lesseq);
1638 m_jit.testl_rr(X86::eax, X86::eax);
1639 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1641 m_jit.link(iter->from, m_jit.label());
1642 m_jit.link((++iter)->from, m_jit.label());
1643 emitPutArg(X86::eax, 0);
1644 emitPutArg(X86::edx, 4);
1645 emitCall(i, Machine::cti_op_loop_if_lesseq);
1646 m_jit.testl_rr(X86::eax, X86::eax);
1647 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1653 unsigned srcDst = instruction[i + 1].u.operand;
1654 X86Assembler::JmpSrc notImm = iter->from;
1655 m_jit.link((++iter)->from, m_jit.label());
1656 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1657 m_jit.link(notImm, m_jit.label());
1658 emitPutArg(X86::eax, 0);
1659 emitCall(i, Machine::cti_op_pre_inc);
1660 emitPutResult(srcDst);
1664 case op_put_by_val: {
1665 // Normal slow cases - either is not an immediate imm, or is an array.
1666 X86Assembler::JmpSrc notImm = iter->from;
1667 m_jit.link((++iter)->from, m_jit.label());
1668 m_jit.link((++iter)->from, m_jit.label());
1669 emitFastArithIntToImmNoCheck(X86::edx);
1670 m_jit.link(notImm, m_jit.label());
1671 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1672 emitPutArg(X86::eax, 0);
1673 emitPutArg(X86::edx, 4);
1674 emitPutArg(X86::ecx, 8);
1675 emitCall(i, Machine::cti_op_put_by_val);
1676 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1678 // slow cases for immediate int accesses to arrays
1679 m_jit.link((++iter)->from, m_jit.label());
1680 m_jit.link((++iter)->from, m_jit.label());
1681 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1682 emitPutArg(X86::eax, 0);
1683 emitPutArg(X86::edx, 4);
1684 emitPutArg(X86::ecx, 8);
1685 emitCall(i, Machine::cti_op_put_by_val_array);
1690 case op_loop_if_true: {
1691 emitSlowScriptCheck(i);
1693 m_jit.link(iter->from, m_jit.label());
1694 emitPutArg(X86::eax, 0);
1695 emitCall(i, Machine::cti_op_jtrue);
1696 m_jit.testl_rr(X86::eax, X86::eax);
1697 unsigned target = instruction[i + 2].u.operand;
1698 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
1703 unsigned srcDst = instruction[i + 1].u.operand;
1704 X86Assembler::JmpSrc notImm = iter->from;
1705 m_jit.link((++iter)->from, m_jit.label());
1706 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1707 m_jit.link(notImm, m_jit.label());
1708 emitPutArg(X86::eax, 0);
1709 emitCall(i, Machine::cti_op_pre_dec);
1710 emitPutResult(srcDst);
1715 unsigned target = instruction[i + 3].u.operand;
1716 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1718 m_jit.link(iter->from, m_jit.label());
1719 emitPutArg(X86::edx, 0);
1720 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1721 emitCall(i, Machine::cti_op_jless);
1722 m_jit.testl_rr(X86::eax, X86::eax);
1723 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1725 m_jit.link(iter->from, m_jit.label());
1726 m_jit.link((++iter)->from, m_jit.label());
1727 emitPutArg(X86::eax, 0);
1728 emitPutArg(X86::edx, 4);
1729 emitCall(i, Machine::cti_op_jless);
1730 m_jit.testl_rr(X86::eax, X86::eax);
1731 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1737 m_jit.link(iter->from, m_jit.label());
1738 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1739 emitPutArg(X86::eax, 0);
1740 emitCall(i, Machine::cti_op_not);
1741 emitPutResult(instruction[i + 1].u.operand);
1746 m_jit.link(iter->from, m_jit.label());
1747 emitPutArg(X86::eax, 0);
1748 emitCall(i, Machine::cti_op_jtrue);
1749 m_jit.testl_rr(X86::eax, X86::eax);
1750 unsigned target = instruction[i + 2].u.operand;
1751 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
1756 unsigned srcDst = instruction[i + 2].u.operand;
1757 m_jit.link(iter->from, m_jit.label());
1758 m_jit.link((++iter)->from, m_jit.label());
1759 emitPutArg(X86::eax, 0);
1760 emitCall(i, Machine::cti_op_post_inc);
1761 emitPutResult(instruction[i + 1].u.operand);
1762 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1763 emitPutResult(srcDst);
1768 m_jit.link(iter->from, m_jit.label());
1769 emitPutArg(X86::eax, 0);
1770 emitCall(i, Machine::cti_op_bitnot);
1771 emitPutResult(instruction[i + 1].u.operand);
1776 unsigned src1 = instruction[i + 2].u.operand;
1777 unsigned src2 = instruction[i + 3].u.operand;
1778 unsigned dst = instruction[i + 1].u.operand;
1779 if (getConstantImmediateNumericArg(src1)) {
1780 m_jit.link(iter->from, m_jit.label());
1781 emitGetPutArg(src1, 0, X86::ecx);
1782 emitPutArg(X86::eax, 4);
1783 emitCall(i, Machine::cti_op_bitand);
1785 } else if (getConstantImmediateNumericArg(src2)) {
1786 m_jit.link(iter->from, m_jit.label());
1787 emitPutArg(X86::eax, 0);
1788 emitGetPutArg(src2, 4, X86::ecx);
1789 emitCall(i, Machine::cti_op_bitand);
1792 m_jit.link(iter->from, m_jit.label());
1793 emitGetPutArg(src1, 0, X86::ecx);
1794 emitPutArg(X86::edx, 4);
1795 emitCall(i, Machine::cti_op_bitand);
1802 m_jit.link(iter->from, m_jit.label());
1803 emitPutArg(X86::eax, 0);
1804 emitCall(i, Machine::cti_op_jtrue);
1805 m_jit.testl_rr(X86::eax, X86::eax);
1806 unsigned target = instruction[i + 2].u.operand;
1807 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
1812 unsigned srcDst = instruction[i + 2].u.operand;
1813 m_jit.link(iter->from, m_jit.label());
1814 m_jit.link((++iter)->from, m_jit.label());
1815 emitPutArg(X86::eax, 0);
1816 emitCall(i, Machine::cti_op_post_dec);
1817 emitPutResult(instruction[i + 1].u.operand);
1818 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1819 emitPutResult(srcDst);
1824 m_jit.link(iter->from, m_jit.label());
1825 emitPutArg(X86::eax, 0);
1826 emitPutArg(X86::edx, 4);
1827 emitCall(i, Machine::cti_op_bitxor);
1828 emitPutResult(instruction[i + 1].u.operand);
1833 m_jit.link(iter->from, m_jit.label());
1834 emitPutArg(X86::eax, 0);
1835 emitPutArg(X86::edx, 4);
1836 emitCall(i, Machine::cti_op_bitor);
1837 emitPutResult(instruction[i + 1].u.operand);
1842 X86Assembler::JmpSrc notImm1 = iter->from;
1843 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1844 m_jit.link((++iter)->from, m_jit.label());
1845 emitFastArithReTagImmediate(X86::eax);
1846 emitFastArithReTagImmediate(X86::ecx);
1847 m_jit.link(notImm1, m_jit.label());
1848 m_jit.link(notImm2, m_jit.label());
1849 emitPutArg(X86::eax, 0);
1850 emitPutArg(X86::ecx, 4);
1851 emitCall(i, Machine::cti_op_mod);
1852 emitPutResult(instruction[i + 1].u.operand);
1856 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_mul);
1858 ASSERT_NOT_REACHED();
1862 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
1865 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1868 void CTI::privateCompile()
1870 // Could use a popl_m, but would need to offset the following instruction if so.
1871 m_jit.popl_r(X86::ecx);
1872 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1873 emitPutToCallFrameHeader(X86::ecx, RegisterFile::CTIReturnEIP);
1875 privateCompileMainPass();
1876 privateCompileLinkPass();
1877 privateCompileSlowCases();
1879 ASSERT(m_jmpTable.isEmpty());
1881 void* code = m_jit.copy();
1884 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
1885 for (unsigned i = 0; i < m_switches.size(); ++i) {
1886 SwitchRecord record = m_switches[i];
1887 unsigned opcodeIndex = record.m_opcodeIndex;
1889 if (record.m_type != SwitchRecord::String) {
1890 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
1891 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
1893 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
1895 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
1896 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
1897 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
1900 ASSERT(record.m_type == SwitchRecord::String);
1902 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
1904 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
1905 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
1906 unsigned offset = it->second.branchOffset;
1907 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
1912 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
1913 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
1915 // FIXME: There doesn't seem to be a way to hint to a hashmap that it should make a certain capacity available;
1916 // could be faster if we could do something like this:
1917 // m_codeBlock->ctiReturnAddressVPCMap.grow(m_calls.size());
1918 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
1919 X86Assembler::link(code, iter->from, iter->to);
1920 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
1923 // Link absolute addresses for jsr
1924 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
1925 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
1927 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
1928 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
1929 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
1930 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
1934 m_codeBlock->ctiCode = code;
1937 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
1939 // Check eax is an object of the right StructureID.
1940 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1941 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
1942 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1943 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
1945 // Checks out okay! - getDirectOffset
1946 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1947 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
1950 void* code = m_jit.copy();
1953 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
1954 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
1956 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
1958 ctiRepatchCallByReturnAddress(returnAddress, code);
1961 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
1963 #if USE(CTI_REPATCH_PIC)
1964 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
1966 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
1967 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
1969 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
1970 // referencing the prototype object - let's speculatively load it's table nice and early!)
1971 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
1972 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
1973 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
1975 // check eax is an object of the right StructureID.
1976 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1977 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
1978 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1979 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
1981 // Check the prototype object's StructureID had not changed.
1982 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
1983 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
1984 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
1986 // Checks out okay! - getDirectOffset
1987 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
1989 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
1991 void* code = m_jit.copy();
1994 // Use the repatch information to link the failure cases back to the original slow case routine.
1995 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
1996 X86Assembler::link(code, failureCases1, slowCaseBegin);
1997 X86Assembler::link(code, failureCases2, slowCaseBegin);
1998 X86Assembler::link(code, failureCases3, slowCaseBegin);
2000 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2001 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2002 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2004 // Track the stub we have created so that it will be deleted later.
2005 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2007 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2008 // FIXME: should revert this repatching, on failure.
2009 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2010 X86Assembler::repatchBranchOffset(jmpLocation, code);
2012 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2013 // referencing the prototype object - let's speculatively load it's table nice and early!)
2014 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2015 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2016 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2018 // check eax is an object of the right StructureID.
2019 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2020 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2021 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2022 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2024 // Check the prototype object's StructureID had not changed.
2025 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2026 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2027 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2029 // Checks out okay! - getDirectOffset
2030 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2034 void* code = m_jit.copy();
2037 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2038 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2039 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2041 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2043 ctiRepatchCallByReturnAddress(returnAddress, code);
2047 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2051 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2053 // Check eax is an object of the right StructureID.
2054 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2055 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2056 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2057 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2059 StructureID* currStructureID = structureID;
2060 RefPtr<StructureID>* chainEntries = chain->head();
2061 JSObject* protoObject = 0;
2062 for (unsigned i = 0; i<count; ++i) {
2063 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2064 currStructureID = chainEntries[i].get();
2066 // Check the prototype object's StructureID had not changed.
2067 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2068 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2069 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2071 ASSERT(protoObject);
2073 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2074 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2075 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2078 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2080 void* code = m_jit.copy();
2083 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2084 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2086 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2088 ctiRepatchCallByReturnAddress(returnAddress, code);
2091 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2093 // check eax is an object of the right StructureID.
2094 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2095 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2096 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2097 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2099 // checks out okay! - putDirectOffset
2100 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2101 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2104 void* code = m_jit.copy();
2107 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2108 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2110 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2112 ctiRepatchCallByReturnAddress(returnAddress, code);
2117 static JSValue* SFX_CALL transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2119 StructureID* oldStructureID = newStructureID->previousID();
2121 baseObject->transitionTo(newStructureID);
2123 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2124 baseObject->allocatePropertyStorage(oldStructureID->propertyMap().storageSize(), oldStructureID->propertyMap().size());
2126 baseObject->putDirectOffset(cachedOffset, value);
2132 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2134 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2137 if (oldStructureID->propertyMap().storageSize() < JSObject::inlineStorageCapacity)
2140 if (oldStructureID->propertyMap().size() != newStructureID->propertyMap().size())
2146 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2148 Vector<X86Assembler::JmpSrc, 16> failureCases;
2149 // check eax is an object of the right StructureID.
2150 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2151 failureCases.append(m_jit.emitUnlinkedJne());
2152 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2153 failureCases.append(m_jit.emitUnlinkedJne());
2154 Vector<X86Assembler::JmpSrc> successCases;
2157 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2158 // proto(ecx) = baseObject->structureID()->prototype()
2159 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_type), X86::ecx);
2160 failureCases.append(m_jit.emitUnlinkedJne());
2161 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2163 // ecx = baseObject->m_structureID
2164 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2165 // null check the prototype
2166 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2167 successCases.append(m_jit.emitUnlinkedJe());
2169 // Check the structure id
2170 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2171 failureCases.append(m_jit.emitUnlinkedJne());
2173 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2174 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_type), X86::ecx);
2175 failureCases.append(m_jit.emitUnlinkedJne());
2176 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2179 failureCases.append(m_jit.emitUnlinkedJne());
2180 for (unsigned i = 0; i < successCases.size(); ++i)
2181 m_jit.link(successCases[i], m_jit.label());
2183 X86Assembler::JmpSrc callTarget;
2184 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2185 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2186 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2187 // codeblock should ensure oldStructureID->m_refCount > 0
2188 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2189 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2190 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2193 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2194 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2196 // Slow case transition -- we're going to need to quite a bit of work,
2197 // so just make a call
2198 m_jit.pushl_r(X86::edx);
2199 m_jit.pushl_r(X86::eax);
2200 m_jit.movl_i32r(cachedOffset, X86::eax);
2201 m_jit.pushl_r(X86::eax);
2202 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2203 m_jit.pushl_r(X86::eax);
2204 callTarget = m_jit.emitCall();
2205 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2208 void* code = m_jit.copy();
2211 for (unsigned i = 0; i < failureCases.size(); ++i)
2212 X86Assembler::link(code, failureCases[i], reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2214 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2215 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2217 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2219 ctiRepatchCallByReturnAddress(returnAddress, code);
2222 void* CTI::privateCompileArrayLengthTrampoline()
2224 // Check eax is an array
2225 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2226 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2227 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2228 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2230 // Checks out okay! - get the length from the storage
2231 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2232 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2234 m_jit.addl_rr(X86::eax, X86::eax);
2235 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2236 m_jit.addl_i8r(1, X86::eax);
2240 void* code = m_jit.copy();
2243 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2244 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2245 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2250 void* CTI::privateCompileStringLengthTrampoline()
2252 // Check eax is a string
2253 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2254 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2255 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2256 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2258 // Checks out okay! - get the length from the Ustring.
2259 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2260 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2262 m_jit.addl_rr(X86::eax, X86::eax);
2263 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2264 m_jit.addl_i8r(1, X86::eax);
2268 void* code = m_jit.copy();
2271 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2272 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2273 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2278 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2280 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2282 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2283 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2284 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2286 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2287 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2288 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2291 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2293 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2295 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2296 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2297 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2299 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2300 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2301 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2304 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2306 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2308 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2309 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2311 // Check eax is an array
2312 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2313 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2314 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2315 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2317 // Checks out okay! - get the length from the storage
2318 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2319 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2321 m_jit.addl_rr(X86::ecx, X86::ecx);
2322 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2323 m_jit.addl_i8r(1, X86::ecx);
2325 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2327 void* code = m_jit.copy();
2330 // Use the repatch information to link the failure cases back to the original slow case routine.
2331 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2332 X86Assembler::link(code, failureCases1, slowCaseBegin);
2333 X86Assembler::link(code, failureCases2, slowCaseBegin);
2334 X86Assembler::link(code, failureCases3, slowCaseBegin);
2336 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2337 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2338 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2340 // Track the stub we have created so that it will be deleted later.
2341 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2343 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2344 // FIXME: should revert this repatching, on failure.
2345 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2346 X86Assembler::repatchBranchOffset(jmpLocation, code);
2349 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
2351 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
2352 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
2353 m_jit.movl_mr(index * sizeof(Register), dst, dst);
2356 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
2358 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
2359 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
2360 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
2365 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
2367 // TODO: better error messages
2368 if (pattern.size() > MaxPatternSize) {
2369 *error_ptr = "regular expression too large";
2373 X86Assembler jit(exec->machine()->jitCodeBuffer());
2374 WRECParser parser(pattern, ignoreCase, multiline, jit);
2376 jit.emitConvertToFastCall();
2378 // Preserve regs & initialize outputRegister.
2379 jit.pushl_r(WRECGenerator::outputRegister);
2380 jit.pushl_r(WRECGenerator::currentValueRegister);
2381 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
2382 jit.pushl_r(WRECGenerator::currentPositionRegister);
2383 // load output pointer
2388 , X86::esp, WRECGenerator::outputRegister);
2390 // restart point on match fail.
2391 WRECGenerator::JmpDst nextLabel = jit.label();
2393 // (1) Parse Disjunction:
2395 // Parsing the disjunction should fully consume the pattern.
2396 JmpSrcVector failures;
2397 parser.parseDisjunction(failures);
2398 if (parser.isEndOfPattern()) {
2399 parser.m_err = WRECParser::Error_malformedPattern;
2402 // TODO: better error messages
2403 *error_ptr = "TODO: better error messages";
2408 // Set return value & pop registers from the stack.
2410 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
2411 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
2413 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
2414 jit.popl_r(X86::eax);
2415 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2416 jit.popl_r(WRECGenerator::currentValueRegister);
2417 jit.popl_r(WRECGenerator::outputRegister);
2420 jit.link(noOutput, jit.label());
2422 jit.popl_r(X86::eax);
2423 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2424 jit.popl_r(WRECGenerator::currentValueRegister);
2425 jit.popl_r(WRECGenerator::outputRegister);
2429 // All fails link to here. Progress the start point & if it is within scope, loop.
2430 // Otherwise, return fail value.
2431 WRECGenerator::JmpDst here = jit.label();
2432 for (unsigned i = 0; i < failures.size(); ++i)
2433 jit.link(failures[i], here);
2436 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
2437 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
2438 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
2439 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
2440 jit.link(jit.emitUnlinkedJle(), nextLabel);
2442 jit.addl_i8r(4, X86::esp);
2444 jit.movl_i32r(-1, X86::eax);
2445 jit.popl_r(WRECGenerator::currentValueRegister);
2446 jit.popl_r(WRECGenerator::outputRegister);
2449 *numSubpatterns_ptr = parser.m_numSubpatterns;
2451 void* code = jit.copy();
2456 #endif // ENABLE(WREC)
2460 #endif // ENABLE(CTI)