2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
34 #include "wrec/WREC.h"
40 #if COMPILER(GCC) && PLATFORM(X86)
42 ".globl _ctiTrampoline" "\n"
43 "_ctiTrampoline:" "\n"
46 "subl $0x24, %esp" "\n"
47 "movl $512, %esi" "\n"
48 "call *0x30(%esp)" "\n" //Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
49 "addl $0x24, %esp" "\n"
56 ".globl _ctiVMThrowTrampoline" "\n"
57 "_ctiVMThrowTrampoline:" "\n"
59 "movl 0x34(%esp), %ecx" "\n" //Ox34 = 0x0D * 4, 0x0D = CTI_ARGS_exec
60 "cmpl $0, 8(%ecx)" "\n"
65 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
66 "addl $0x24, %esp" "\n"
76 __declspec(naked) JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**)
92 __declspec(naked) void ctiVMThrowTrampoline()
96 call JSC::Machine::cti_vm_throw;
109 // get arg puts an arg from the SF register array into a h/w register
110 ALWAYS_INLINE void CTI::emitGetArg(unsigned src, X86Assembler::RegisterID dst)
112 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
113 if (src < m_codeBlock->constantRegisters.size()) {
114 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
115 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
117 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
120 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
121 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
123 if (src < m_codeBlock->constantRegisters.size()) {
124 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
125 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
127 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
128 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
132 // puts an arg onto the stack, as an arg to a context threaded function.
133 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
135 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
138 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
140 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
143 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
145 if (src < m_codeBlock->constantRegisters.size()) {
146 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
147 return JSImmediate::isNumber(js) ? js : 0;
152 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
154 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
157 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
159 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
162 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
164 m_jit.movl_rm(from, -((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi);
167 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
169 m_jit.movl_mr(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi, to);
172 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
174 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
175 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
178 #if ENABLE(SAMPLING_TOOL)
179 unsigned inCalledCode = 0;
182 void ctiSetReturnAddress(void** where, void* what)
187 void ctiRepatchCallByReturnAddress(void* where, void* what)
189 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
194 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
200 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
202 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
203 m_jit.cmpl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
204 X86Assembler::JmpSrc noException = m_jit.emitUnlinkedJe();
206 m_jit.link(noException, m_jit.label());
209 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
212 if (src1 < m_codeBlock->constantRegisters.size()) {
213 JSValue* js = m_codeBlock->constantRegisters[src1].jsValue(m_exec);
215 JSImmediate::isImmediate(js) ?
216 (JSImmediate::isNumber(js) ? 'i' :
217 JSImmediate::isBoolean(js) ? 'b' :
218 js->isUndefined() ? 'u' :
219 js->isNull() ? 'n' : '?')
221 (js->isString() ? 's' :
222 js->isObject() ? 'o' :
226 if (src2 < m_codeBlock->constantRegisters.size()) {
227 JSValue* js = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
229 JSImmediate::isImmediate(js) ?
230 (JSImmediate::isNumber(js) ? 'i' :
231 JSImmediate::isBoolean(js) ? 'b' :
232 js->isUndefined() ? 'u' :
233 js->isNull() ? 'n' : '?')
235 (js->isString() ? 's' :
236 js->isObject() ? 'o' :
239 if ((which1 != '*') | (which2 != '*'))
240 fprintf(stderr, "Types %c %c\n", which1, which2);
245 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
247 #if ENABLE(SAMPLING_TOOL)
248 m_jit.movl_i32m(1, &inCalledCode);
250 X86Assembler::JmpSrc call = m_jit.emitCall();
251 m_calls.append(CallRecord(call, helper, opcodeIndex));
252 emitDebugExceptionCheck();
253 #if ENABLE(SAMPLING_TOOL)
254 m_jit.movl_i32m(0, &inCalledCode);
260 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
262 #if ENABLE(SAMPLING_TOOL)
263 m_jit.movl_i32m(1, &inCalledCode);
265 X86Assembler::JmpSrc call = m_jit.emitCall();
266 m_calls.append(CallRecord(call, helper, opcodeIndex));
267 emitDebugExceptionCheck();
268 #if ENABLE(SAMPLING_TOOL)
269 m_jit.movl_i32m(0, &inCalledCode);
275 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
277 #if ENABLE(SAMPLING_TOOL)
278 m_jit.movl_i32m(1, &inCalledCode);
280 X86Assembler::JmpSrc call = m_jit.emitCall();
281 m_calls.append(CallRecord(call, helper, opcodeIndex));
282 emitDebugExceptionCheck();
283 #if ENABLE(SAMPLING_TOOL)
284 m_jit.movl_i32m(0, &inCalledCode);
290 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
292 #if ENABLE(SAMPLING_TOOL)
293 m_jit.movl_i32m(1, &inCalledCode);
295 X86Assembler::JmpSrc call = m_jit.emitCall();
296 m_calls.append(CallRecord(call, helper, opcodeIndex));
297 emitDebugExceptionCheck();
298 #if ENABLE(SAMPLING_TOOL)
299 m_jit.movl_i32m(0, &inCalledCode);
305 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
307 #if ENABLE(SAMPLING_TOOL)
308 m_jit.movl_i32m(1, &inCalledCode);
310 X86Assembler::JmpSrc call = m_jit.emitCall();
311 m_calls.append(CallRecord(call, helper, opcodeIndex));
312 emitDebugExceptionCheck();
313 #if ENABLE(SAMPLING_TOOL)
314 m_jit.movl_i32m(0, &inCalledCode);
320 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfIsJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
322 m_jit.testl_i32r(JSImmediate::TagMask, reg);
323 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
326 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
328 m_jit.testl_i32r(JSImmediate::TagMask, reg);
329 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
332 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImm(X86Assembler::RegisterID reg, unsigned opcodeIndex)
334 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
335 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
338 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImms(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
340 m_jit.movl_rr(reg1, X86::ecx);
341 m_jit.andl_rr(reg2, X86::ecx);
342 emitJumpSlowCaseIfNotImm(X86::ecx, opcodeIndex);
345 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
347 ASSERT(JSImmediate::isNumber(imm));
348 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
351 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
353 // op_mod relies on this being a sub - setting zf if result is 0.
354 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
357 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
359 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
362 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
364 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
367 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
369 m_jit.sarl_i8r(1, reg);
372 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
374 m_jit.addl_rr(reg, reg);
375 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
376 emitFastArithReTagImmediate(reg);
379 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
381 m_jit.addl_rr(reg, reg);
382 emitFastArithReTagImmediate(reg);
385 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
386 : m_jit(machine->jitCodeBuffer())
389 , m_codeBlock(codeBlock)
390 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
391 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
395 #define CTI_COMPILE_BINARY_OP(name) \
397 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
398 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
399 emitCall(i, Machine::cti_##name); \
400 emitPutResult(instruction[i + 1].u.operand); \
405 #define CTI_COMPILE_UNARY_OP(name) \
407 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
408 emitCall(i, Machine::cti_##name); \
409 emitPutResult(instruction[i + 1].u.operand); \
414 #if ENABLE(SAMPLING_TOOL)
415 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
418 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
420 if (type == OpConstruct) {
421 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
422 emitPutArgConstant(instruction[i + 5].u.operand, 12);
423 emitPutArgConstant(instruction[i + 4].u.operand, 8);
424 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
426 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
427 emitPutArgConstant(instruction[i + 5].u.operand, 12);
428 emitPutArgConstant(instruction[i + 4].u.operand, 8);
429 // FIXME: should this be loaded dynamically off m_exec?
430 int thisVal = instruction[i + 3].u.operand;
431 if (thisVal == missingThisObjectMarker()) {
432 emitPutArgConstant(reinterpret_cast<unsigned>(m_exec->globalThisValue()), 4);
434 emitGetPutArg(thisVal, 4, X86::ecx);
437 X86Assembler::JmpSrc wasEval;
438 if (type == OpCallEval) {
439 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
440 emitCall(i, Machine::cti_op_call_eval);
441 m_jit.emitRestoreArgumentReference();
443 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
445 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
446 wasEval = m_jit.emitUnlinkedJne();
448 // this reloads the first arg into ecx (checked just below).
449 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
451 // this sets up the first arg, and explicitly leaves the value in ecx (checked just below).
452 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
453 emitPutArg(X86::ecx, 0);
456 // Fast check for JS function.
457 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
458 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
459 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
460 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
461 m_jit.link(isNotObject, m_jit.label());
463 // This handles host functions
464 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
465 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
467 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
468 m_jit.link(isJSFunction, m_jit.label());
470 // This handles JSFunctions
471 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction));
472 m_jit.call_r(X86::eax);
473 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
475 X86Assembler::JmpDst end = m_jit.label();
476 m_jit.link(wasNotJSFunction, end);
477 if (type == OpCallEval)
478 m_jit.link(wasEval, end);
480 emitPutResult(instruction[i + 1].u.operand);
483 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
485 m_jit.subl_i8r(1, X86::esi);
486 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
487 emitCall(opcodeIndex, Machine::cti_timeout_check);
489 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
490 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_globalData), X86::ecx, X86::ecx);
491 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
492 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
493 m_jit.link(skipTimeout, m_jit.label());
496 void CTI::privateCompileMainPass()
498 Instruction* instruction = m_codeBlock->instructions.begin();
499 unsigned instructionCount = m_codeBlock->instructions.size();
501 unsigned structureIDInstructionIndex = 0;
503 for (unsigned i = 0; i < instructionCount; ) {
504 m_labels[i] = m_jit.label();
506 #if ENABLE(SAMPLING_TOOL)
507 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
510 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
511 m_jit.emitRestoreArgumentReference();
512 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
514 unsigned src = instruction[i + 2].u.operand;
515 if (src < m_codeBlock->constantRegisters.size())
516 m_jit.movl_i32r(reinterpret_cast<unsigned>(m_codeBlock->constantRegisters[src].jsValue(m_exec)), X86::edx);
518 emitGetArg(src, X86::edx);
519 emitPutResult(instruction[i + 1].u.operand, X86::edx);
524 unsigned dst = instruction[i + 1].u.operand;
525 unsigned src1 = instruction[i + 2].u.operand;
526 unsigned src2 = instruction[i + 3].u.operand;
527 if (src2 < m_codeBlock->constantRegisters.size()) {
528 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
529 if (JSImmediate::isNumber(value)) {
530 emitGetArg(src1, X86::eax);
531 emitJumpSlowCaseIfNotImm(X86::eax, i);
532 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
533 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
538 } else if (!(src1 < m_codeBlock->constantRegisters.size())) {
539 emitGetArg(src1, X86::eax);
540 emitGetArg(src2, X86::edx);
541 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
542 emitFastArithDeTagImmediate(X86::eax);
543 m_jit.addl_rr(X86::edx, X86::eax);
544 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
549 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
550 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
551 emitCall(i, Machine::cti_op_add);
552 emitPutResult(instruction[i + 1].u.operand);
557 if (m_codeBlock->needsFullScopeChain)
558 emitCall(i, Machine::cti_op_end);
559 emitGetArg(instruction[i + 1].u.operand, X86::eax);
560 #if ENABLE(SAMPLING_TOOL)
561 m_jit.movl_i32m(-1, ¤tOpcodeID);
563 m_jit.pushl_m(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi);
569 unsigned target = instruction[i + 1].u.operand;
570 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
575 int srcDst = instruction[i + 1].u.operand;
576 emitGetArg(srcDst, X86::eax);
577 emitJumpSlowCaseIfNotImm(X86::eax, i);
578 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
579 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
580 emitPutResult(srcDst, X86::eax);
585 emitSlowScriptCheck(i);
587 unsigned target = instruction[i + 1].u.operand;
588 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
592 case op_loop_if_less: {
593 emitSlowScriptCheck(i);
595 unsigned target = instruction[i + 3].u.operand;
596 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
598 emitGetArg(instruction[i + 1].u.operand, X86::edx);
599 emitJumpSlowCaseIfNotImm(X86::edx, i);
600 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
601 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
603 emitGetArg(instruction[i + 1].u.operand, X86::eax);
604 emitGetArg(instruction[i + 2].u.operand, X86::edx);
605 emitJumpSlowCaseIfNotImm(X86::eax, i);
606 emitJumpSlowCaseIfNotImm(X86::edx, i);
607 m_jit.cmpl_rr(X86::edx, X86::eax);
608 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
613 case op_loop_if_lesseq: {
614 emitSlowScriptCheck(i);
616 unsigned target = instruction[i + 3].u.operand;
617 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
619 emitGetArg(instruction[i + 1].u.operand, X86::edx);
620 emitJumpSlowCaseIfNotImm(X86::edx, i);
621 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
622 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
624 emitGetArg(instruction[i + 1].u.operand, X86::eax);
625 emitGetArg(instruction[i + 2].u.operand, X86::edx);
626 emitJumpSlowCaseIfNotImm(X86::eax, i);
627 emitJumpSlowCaseIfNotImm(X86::edx, i);
628 m_jit.cmpl_rr(X86::edx, X86::eax);
629 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
634 case op_new_object: {
635 emitCall(i, Machine::cti_op_new_object);
636 emitPutResult(instruction[i + 1].u.operand);
641 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
642 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
643 // such that the StructureID & offset are always at the same distance from this.
645 emitGetArg(instruction[i + 1].u.operand, X86::eax);
646 emitGetArg(instruction[i + 3].u.operand, X86::edx);
648 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
649 X86Assembler::JmpDst hotPathBegin = m_jit.label();
650 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
651 ++structureIDInstructionIndex;
653 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
654 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
655 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
656 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
657 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
658 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
660 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
661 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
662 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
663 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
669 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
670 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
671 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
672 // to jump back to if one of these trampolies finds a match.
674 emitGetArg(instruction[i + 2].u.operand, X86::eax);
676 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
678 X86Assembler::JmpDst hotPathBegin = m_jit.label();
679 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
680 ++structureIDInstructionIndex;
682 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
683 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
684 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
685 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
686 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
688 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
689 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
690 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
691 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
696 case op_instanceof: {
697 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
698 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
699 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
700 emitCall(i, Machine::cti_op_instanceof);
701 emitPutResult(instruction[i + 1].u.operand);
706 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
707 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
708 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
709 emitCall(i, Machine::cti_op_del_by_id);
710 emitPutResult(instruction[i + 1].u.operand);
715 unsigned dst = instruction[i + 1].u.operand;
716 unsigned src1 = instruction[i + 2].u.operand;
717 unsigned src2 = instruction[i + 3].u.operand;
718 if (src1 < m_codeBlock->constantRegisters.size() || src2 < m_codeBlock->constantRegisters.size()) {
719 unsigned constant = src1;
720 unsigned nonconstant = src2;
721 if (!(src1 < m_codeBlock->constantRegisters.size())) {
725 JSValue* value = m_codeBlock->constantRegisters[constant].jsValue(m_exec);
726 if (JSImmediate::isNumber(value)) {
727 emitGetArg(nonconstant, X86::eax);
728 emitJumpSlowCaseIfNotImm(X86::eax, i);
729 emitFastArithImmToInt(X86::eax);
730 m_jit.imull_i32r( X86::eax, getDeTaggedConstantImmediate(value), X86::eax);
731 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
732 emitFastArithPotentiallyReTagImmediate(X86::eax);
739 emitGetArg(src1, X86::eax);
740 emitGetArg(src2, X86::edx);
741 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
742 emitFastArithDeTagImmediate(X86::eax);
743 emitFastArithImmToInt(X86::edx);
744 m_jit.imull_rr(X86::edx, X86::eax);
745 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
746 emitFastArithPotentiallyReTagImmediate(X86::eax);
752 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
753 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
754 emitCall(i, Machine::cti_op_new_func);
755 emitPutResult(instruction[i + 1].u.operand);
760 compileOpCall(instruction, i);
764 case op_get_global_var: {
765 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
766 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
767 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
768 emitPutResult(instruction[i + 1].u.operand, X86::eax);
772 case op_put_global_var: {
773 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
774 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
775 emitGetArg(instruction[i + 3].u.operand, X86::edx);
776 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
780 case op_get_scoped_var: {
781 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
783 emitGetCTIParam(CTI_ARGS_scopeChain, X86::eax);
785 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
787 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
788 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
789 emitPutResult(instruction[i + 1].u.operand);
793 case op_put_scoped_var: {
794 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
796 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
797 emitGetArg(instruction[i + 3].u.operand, X86::eax);
799 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
801 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
802 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
807 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
808 emitCall(i, Machine::cti_op_ret);
810 m_jit.pushl_m(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi);
816 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
817 emitPutArg(X86::edx, 0);
818 emitPutArgConstant(instruction[i + 3].u.operand, 4);
819 emitCall(i, Machine::cti_op_new_array);
820 emitPutResult(instruction[i + 1].u.operand);
825 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
826 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
827 emitCall(i, Machine::cti_op_resolve);
828 emitPutResult(instruction[i + 1].u.operand);
833 compileOpCall(instruction, i, OpConstruct);
837 case op_get_by_val: {
838 emitGetArg(instruction[i + 2].u.operand, X86::eax);
839 emitGetArg(instruction[i + 3].u.operand, X86::edx);
840 emitJumpSlowCaseIfNotImm(X86::edx, i);
841 emitFastArithImmToInt(X86::edx);
842 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
843 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
844 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
845 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
847 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
848 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
849 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
850 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
852 // Get the value from the vector
853 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
854 emitPutResult(instruction[i + 1].u.operand);
858 case op_resolve_func: {
859 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
860 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
861 emitCall(i, Machine::cti_op_resolve_func);
862 emitPutResult(instruction[i + 1].u.operand);
863 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
864 emitPutResult(instruction[i + 2].u.operand);
869 emitGetArg(instruction[i + 2].u.operand, X86::eax);
870 emitGetArg(instruction[i + 3].u.operand, X86::edx);
871 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
872 m_jit.subl_rr(X86::edx, X86::eax);
873 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
874 emitFastArithReTagImmediate(X86::eax);
875 emitPutResult(instruction[i + 1].u.operand);
879 case op_put_by_val: {
880 emitGetArg(instruction[i + 1].u.operand, X86::eax);
881 emitGetArg(instruction[i + 2].u.operand, X86::edx);
882 emitJumpSlowCaseIfNotImm(X86::edx, i);
883 emitFastArithImmToInt(X86::edx);
884 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
885 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
886 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
887 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
889 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
890 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
891 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
892 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
893 // No; oh well, check if the access if within the vector - if so, we may still be okay.
894 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
895 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
897 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
898 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
899 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
900 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
902 // All good - put the value into the array.
903 m_jit.link(inFastVector, m_jit.label());
904 emitGetArg(instruction[i + 3].u.operand, X86::eax);
905 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
909 CTI_COMPILE_BINARY_OP(op_lesseq)
910 case op_loop_if_true: {
911 emitSlowScriptCheck(i);
913 unsigned target = instruction[i + 2].u.operand;
914 emitGetArg(instruction[i + 1].u.operand, X86::eax);
916 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
917 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
918 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
919 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
921 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
922 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
923 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
924 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
926 m_jit.link(isZero, m_jit.label());
930 case op_resolve_base: {
931 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
932 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
933 emitCall(i, Machine::cti_op_resolve_base);
934 emitPutResult(instruction[i + 1].u.operand);
939 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
940 emitCall(i, Machine::cti_op_negate);
941 emitPutResult(instruction[i + 1].u.operand);
945 case op_resolve_skip: {
946 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
947 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
948 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
949 emitCall(i, Machine::cti_op_resolve_skip);
950 emitPutResult(instruction[i + 1].u.operand);
954 CTI_COMPILE_BINARY_OP(op_div)
956 int srcDst = instruction[i + 1].u.operand;
957 emitGetArg(srcDst, X86::eax);
958 emitJumpSlowCaseIfNotImm(X86::eax, i);
959 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
960 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
961 emitPutResult(srcDst, X86::eax);
966 unsigned target = instruction[i + 3].u.operand;
967 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
969 emitGetArg(instruction[i + 1].u.operand, X86::edx);
970 emitJumpSlowCaseIfNotImm(X86::edx, i);
971 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
972 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
974 emitGetArg(instruction[i + 1].u.operand, X86::eax);
975 emitGetArg(instruction[i + 2].u.operand, X86::edx);
976 emitJumpSlowCaseIfNotImm(X86::eax, i);
977 emitJumpSlowCaseIfNotImm(X86::edx, i);
978 m_jit.cmpl_rr(X86::edx, X86::eax);
979 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
985 emitGetArg(instruction[i + 2].u.operand, X86::eax);
986 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
987 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
988 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
989 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
990 emitPutResult(instruction[i + 1].u.operand);
995 unsigned target = instruction[i + 2].u.operand;
996 emitGetArg(instruction[i + 1].u.operand, X86::eax);
998 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
999 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1000 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1001 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1003 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1004 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1005 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1006 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1008 m_jit.link(isNonZero, m_jit.label());
1013 int srcDst = instruction[i + 2].u.operand;
1014 emitGetArg(srcDst, X86::eax);
1015 m_jit.movl_rr(X86::eax, X86::edx);
1016 emitJumpSlowCaseIfNotImm(X86::eax, i);
1017 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1018 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1019 emitPutResult(srcDst, X86::edx);
1020 emitPutResult(instruction[i + 1].u.operand);
1024 case op_unexpected_load: {
1025 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1026 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1027 emitPutResult(instruction[i + 1].u.operand);
1032 int retAddrDst = instruction[i + 1].u.operand;
1033 int target = instruction[i + 2].u.operand;
1034 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1035 X86Assembler::JmpDst addrPosition = m_jit.label();
1036 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1037 X86Assembler::JmpDst sretTarget = m_jit.label();
1038 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1043 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1047 CTI_COMPILE_BINARY_OP(op_eq)
1049 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1050 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1051 emitJumpSlowCaseIfNotImm(X86::eax, i);
1052 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1053 emitFastArithImmToInt(X86::eax);
1054 emitFastArithImmToInt(X86::ecx);
1055 m_jit.shll_CLr(X86::eax);
1056 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1057 emitPutResult(instruction[i + 1].u.operand);
1062 unsigned src1 = instruction[i + 2].u.operand;
1063 unsigned src2 = instruction[i + 3].u.operand;
1064 unsigned dst = instruction[i + 1].u.operand;
1065 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1066 emitGetArg(src2, X86::eax);
1067 emitJumpSlowCaseIfNotImm(X86::eax, i);
1068 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1070 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1071 emitGetArg(src1, X86::eax);
1072 emitJumpSlowCaseIfNotImm(X86::eax, i);
1073 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1076 emitGetArg(src1, X86::eax);
1077 emitGetArg(src2, X86::edx);
1078 m_jit.andl_rr(X86::edx, X86::eax);
1079 emitJumpSlowCaseIfNotImm(X86::eax, i);
1086 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1087 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1088 emitJumpSlowCaseIfNotImm(X86::eax, i);
1089 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1090 emitFastArithImmToInt(X86::ecx);
1091 m_jit.sarl_CLr(X86::eax);
1092 emitFastArithPotentiallyReTagImmediate(X86::eax);
1093 emitPutResult(instruction[i + 1].u.operand);
1098 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1099 emitJumpSlowCaseIfNotImm(X86::eax, i);
1100 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1101 emitPutResult(instruction[i + 1].u.operand);
1105 case op_resolve_with_base: {
1106 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1107 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1108 emitCall(i, Machine::cti_op_resolve_with_base);
1109 emitPutResult(instruction[i + 1].u.operand);
1110 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1111 emitPutResult(instruction[i + 2].u.operand);
1115 case op_new_func_exp: {
1116 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1117 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1118 emitCall(i, Machine::cti_op_new_func_exp);
1119 emitPutResult(instruction[i + 1].u.operand);
1124 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1125 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1126 emitJumpSlowCaseIfNotImm(X86::eax, i);
1127 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1128 emitFastArithDeTagImmediate(X86::eax);
1129 emitFastArithDeTagImmediate(X86::ecx);
1130 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1132 m_jit.idivl_r(X86::ecx);
1133 emitFastArithReTagImmediate(X86::edx);
1134 m_jit.movl_rr(X86::edx, X86::eax);
1135 emitPutResult(instruction[i + 1].u.operand);
1140 unsigned target = instruction[i + 2].u.operand;
1141 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1143 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1144 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1145 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1146 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1148 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1149 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1150 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1151 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1153 m_jit.link(isZero, m_jit.label());
1157 CTI_COMPILE_BINARY_OP(op_less)
1158 CTI_COMPILE_BINARY_OP(op_neq)
1160 int srcDst = instruction[i + 2].u.operand;
1161 emitGetArg(srcDst, X86::eax);
1162 m_jit.movl_rr(X86::eax, X86::edx);
1163 emitJumpSlowCaseIfNotImm(X86::eax, i);
1164 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1165 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1166 emitPutResult(srcDst, X86::edx);
1167 emitPutResult(instruction[i + 1].u.operand);
1171 CTI_COMPILE_BINARY_OP(op_urshift)
1173 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1174 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1175 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
1176 m_jit.xorl_rr(X86::edx, X86::eax);
1177 emitFastArithReTagImmediate(X86::eax);
1178 emitPutResult(instruction[i + 1].u.operand);
1182 case op_new_regexp: {
1183 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1184 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1185 emitCall(i, Machine::cti_op_new_regexp);
1186 emitPutResult(instruction[i + 1].u.operand);
1191 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1192 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1193 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
1194 m_jit.orl_rr(X86::edx, X86::eax);
1195 emitPutResult(instruction[i + 1].u.operand);
1199 case op_call_eval: {
1200 compileOpCall(instruction, i, OpCallEval);
1205 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1206 emitCall(i, Machine::cti_op_throw);
1207 m_jit.addl_i8r(0x24, X86::esp);
1208 m_jit.popl_r(X86::edi);
1209 m_jit.popl_r(X86::esi);
1214 case op_get_pnames: {
1215 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1216 emitCall(i, Machine::cti_op_get_pnames);
1217 emitPutResult(instruction[i + 1].u.operand);
1221 case op_next_pname: {
1222 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1223 unsigned target = instruction[i + 3].u.operand;
1224 emitCall(i, Machine::cti_op_next_pname);
1225 m_jit.testl_rr(X86::eax, X86::eax);
1226 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1227 emitPutResult(instruction[i + 1].u.operand);
1228 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1229 m_jit.link(endOfIter, m_jit.label());
1233 case op_push_scope: {
1234 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1235 emitCall(i, Machine::cti_op_push_scope);
1239 case op_pop_scope: {
1240 emitCall(i, Machine::cti_op_pop_scope);
1244 CTI_COMPILE_UNARY_OP(op_typeof)
1245 CTI_COMPILE_UNARY_OP(op_is_undefined)
1246 CTI_COMPILE_UNARY_OP(op_is_boolean)
1247 CTI_COMPILE_UNARY_OP(op_is_number)
1248 CTI_COMPILE_UNARY_OP(op_is_string)
1249 CTI_COMPILE_UNARY_OP(op_is_object)
1250 CTI_COMPILE_UNARY_OP(op_is_function)
1251 CTI_COMPILE_BINARY_OP(op_stricteq)
1252 CTI_COMPILE_BINARY_OP(op_nstricteq)
1253 case op_to_jsnumber: {
1254 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1255 emitCall(i, Machine::cti_op_to_jsnumber);
1256 emitPutResult(instruction[i + 1].u.operand);
1261 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1262 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1263 emitCall(i, Machine::cti_op_in);
1264 emitPutResult(instruction[i + 1].u.operand);
1268 case op_push_new_scope: {
1269 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1270 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1271 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1272 emitCall(i, Machine::cti_op_push_new_scope);
1273 emitPutResult(instruction[i + 1].u.operand);
1278 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1279 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1280 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_exception), X86::ecx, X86::eax);
1281 m_jit.movl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
1282 emitPutResult(instruction[i + 1].u.operand);
1286 case op_jmp_scopes: {
1287 unsigned count = instruction[i + 1].u.operand;
1288 emitPutArgConstant(count, 0);
1289 emitCall(i, Machine::cti_op_jmp_scopes);
1290 unsigned target = instruction[i + 2].u.operand;
1291 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1295 case op_put_by_index: {
1296 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1297 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1298 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1299 emitCall(i, Machine::cti_op_put_by_index);
1303 case op_switch_imm: {
1304 unsigned tableIndex = instruction[i + 1].u.operand;
1305 unsigned defaultOffset = instruction[i + 2].u.operand;
1306 unsigned scrutinee = instruction[i + 3].u.operand;
1308 // create jump table for switch destinations, track this switch statement.
1309 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1310 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1311 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1313 emitGetPutArg(scrutinee, 0, X86::ecx);
1314 emitPutArgConstant(tableIndex, 4);
1315 emitCall(i, Machine::cti_op_switch_imm);
1316 m_jit.jmp_r(X86::eax);
1320 case op_switch_char: {
1321 unsigned tableIndex = instruction[i + 1].u.operand;
1322 unsigned defaultOffset = instruction[i + 2].u.operand;
1323 unsigned scrutinee = instruction[i + 3].u.operand;
1325 // create jump table for switch destinations, track this switch statement.
1326 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1327 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1328 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1330 emitGetPutArg(scrutinee, 0, X86::ecx);
1331 emitPutArgConstant(tableIndex, 4);
1332 emitCall(i, Machine::cti_op_switch_char);
1333 m_jit.jmp_r(X86::eax);
1337 case op_switch_string: {
1338 unsigned tableIndex = instruction[i + 1].u.operand;
1339 unsigned defaultOffset = instruction[i + 2].u.operand;
1340 unsigned scrutinee = instruction[i + 3].u.operand;
1342 // create jump table for switch destinations, track this switch statement.
1343 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1344 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1346 emitGetPutArg(scrutinee, 0, X86::ecx);
1347 emitPutArgConstant(tableIndex, 4);
1348 emitCall(i, Machine::cti_op_switch_string);
1349 m_jit.jmp_r(X86::eax);
1353 case op_del_by_val: {
1354 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1355 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1356 emitCall(i, Machine::cti_op_del_by_val);
1357 emitPutResult(instruction[i + 1].u.operand);
1361 case op_put_getter: {
1362 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1363 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1364 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1365 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1366 emitCall(i, Machine::cti_op_put_getter);
1370 case op_put_setter: {
1371 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1372 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1373 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1374 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1375 emitCall(i, Machine::cti_op_put_setter);
1379 case op_new_error: {
1380 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1381 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1382 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1383 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1384 emitCall(i, Machine::cti_op_new_error);
1385 emitPutResult(instruction[i + 1].u.operand);
1390 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1391 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1392 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1393 emitCall(i, Machine::cti_op_debug);
1398 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1400 // go to a slow case either if this is not an immediate, or if the immediate is not undefined/null.
1401 emitJumpSlowCaseIfIsJSCell(X86::edx, i);
1402 m_jit.andl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::edx);
1403 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::edx);
1404 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1406 m_jit.movl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1407 emitPutResult(instruction[i + 1].u.operand);
1413 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1415 // go to a slow case either if this is not an immediate, or if the immediate is not undefined/null.
1416 emitJumpSlowCaseIfIsJSCell(X86::edx, i);
1417 m_jit.andl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::edx);
1418 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::edx);
1419 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1421 m_jit.movl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1422 emitPutResult(instruction[i + 1].u.operand);
1427 case op_get_array_length:
1428 case op_get_by_id_chain:
1429 case op_get_by_id_generic:
1430 case op_get_by_id_proto:
1431 case op_get_by_id_self:
1432 case op_get_string_length:
1433 case op_put_by_id_generic:
1434 case op_put_by_id_replace:
1435 case op_put_by_id_transition:
1436 ASSERT_NOT_REACHED();
1440 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1444 void CTI::privateCompileLinkPass()
1446 unsigned jmpTableCount = m_jmpTable.size();
1447 for (unsigned i = 0; i < jmpTableCount; ++i)
1448 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
1452 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
1454 m_jit.link(iter->from, m_jit.label()); \
1455 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
1456 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
1457 emitCall(i, Machine::cti_##name); \
1458 emitPutResult(instruction[i + 1].u.operand); \
1463 void CTI::privateCompileSlowCases()
1465 unsigned structureIDInstructionIndex = 0;
1467 Instruction* instruction = m_codeBlock->instructions.begin();
1468 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
1469 unsigned i = iter->to;
1470 m_jit.emitRestoreArgumentReference();
1471 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
1473 unsigned dst = instruction[i + 1].u.operand;
1474 unsigned src2 = instruction[i + 3].u.operand;
1475 if (src2 < m_codeBlock->constantRegisters.size()) {
1476 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
1477 if (JSImmediate::isNumber(value)) {
1478 X86Assembler::JmpSrc notImm = iter->from;
1479 m_jit.link((++iter)->from, m_jit.label());
1480 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1481 m_jit.link(notImm, m_jit.label());
1482 emitPutArg(X86::eax, 0);
1483 emitGetPutArg(src2, 4, X86::ecx);
1484 emitCall(i, Machine::cti_op_add);
1491 ASSERT(!(static_cast<unsigned>(instruction[i + 2].u.operand) < m_codeBlock->constantRegisters.size()));
1493 X86Assembler::JmpSrc notImm = iter->from;
1494 m_jit.link((++iter)->from, m_jit.label());
1495 m_jit.subl_rr(X86::edx, X86::eax);
1496 emitFastArithReTagImmediate(X86::eax);
1497 m_jit.link(notImm, m_jit.label());
1498 emitPutArg(X86::eax, 0);
1499 emitPutArg(X86::edx, 4);
1500 emitCall(i, Machine::cti_op_add);
1505 case op_get_by_val: {
1506 // The slow case that handles accesses to arrays (below) may jump back up to here.
1507 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
1509 X86Assembler::JmpSrc notImm = iter->from;
1510 m_jit.link((++iter)->from, m_jit.label());
1511 m_jit.link((++iter)->from, m_jit.label());
1512 emitFastArithIntToImmNoCheck(X86::edx);
1513 m_jit.link(notImm, m_jit.label());
1514 emitPutArg(X86::eax, 0);
1515 emitPutArg(X86::edx, 4);
1516 emitCall(i, Machine::cti_op_get_by_val);
1517 emitPutResult(instruction[i + 1].u.operand);
1518 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1520 // This is slow case that handles accesses to arrays above the fast cut-off.
1521 // First, check if this is an access to the vector
1522 m_jit.link((++iter)->from, m_jit.label());
1523 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1524 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
1526 // okay, missed the fast region, but it is still in the vector. Get the value.
1527 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
1528 // Check whether the value loaded is zero; if so we need to return undefined.
1529 m_jit.testl_rr(X86::ecx, X86::ecx);
1530 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
1531 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1537 X86Assembler::JmpSrc notImm = iter->from;
1538 m_jit.link((++iter)->from, m_jit.label());
1539 m_jit.addl_rr(X86::edx, X86::eax);
1540 m_jit.link(notImm, m_jit.label());
1541 emitPutArg(X86::eax, 0);
1542 emitPutArg(X86::edx, 4);
1543 emitCall(i, Machine::cti_op_sub);
1544 emitPutResult(instruction[i + 1].u.operand);
1549 m_jit.link(iter->from, m_jit.label());
1550 m_jit.link((++iter)->from, m_jit.label());
1551 emitPutArg(X86::eax, 0);
1552 emitPutArg(X86::ecx, 4);
1553 emitCall(i, Machine::cti_op_rshift);
1554 emitPutResult(instruction[i + 1].u.operand);
1559 X86Assembler::JmpSrc notImm1 = iter->from;
1560 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1561 m_jit.link((++iter)->from, m_jit.label());
1562 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1563 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1564 m_jit.link(notImm1, m_jit.label());
1565 m_jit.link(notImm2, m_jit.label());
1566 emitPutArg(X86::eax, 0);
1567 emitPutArg(X86::ecx, 4);
1568 emitCall(i, Machine::cti_op_lshift);
1569 emitPutResult(instruction[i + 1].u.operand);
1573 case op_loop_if_less: {
1574 emitSlowScriptCheck(i);
1576 unsigned target = instruction[i + 3].u.operand;
1577 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1579 m_jit.link(iter->from, m_jit.label());
1580 emitPutArg(X86::edx, 0);
1581 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1582 emitCall(i, Machine::cti_op_loop_if_less);
1583 m_jit.testl_rr(X86::eax, X86::eax);
1584 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1586 m_jit.link(iter->from, m_jit.label());
1587 m_jit.link((++iter)->from, m_jit.label());
1588 emitPutArg(X86::eax, 0);
1589 emitPutArg(X86::edx, 4);
1590 emitCall(i, Machine::cti_op_loop_if_less);
1591 m_jit.testl_rr(X86::eax, X86::eax);
1592 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1597 case op_put_by_id: {
1598 m_jit.link(iter->from, m_jit.label());
1599 m_jit.link((++iter)->from, m_jit.label());
1601 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1602 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1603 emitPutArg(X86::eax, 0);
1604 emitPutArg(X86::edx, 8);
1605 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
1607 // Track the location of the call; this will be used to recover repatch information.
1608 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1609 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1610 ++structureIDInstructionIndex;
1615 case op_get_by_id: {
1616 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1617 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1618 // of the call (which we can use to look up the repatch information), but should a array-length or
1619 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back
1620 // the distance from the call to the head of the slow case.
1622 m_jit.link(iter->from, m_jit.label());
1623 m_jit.link((++iter)->from, m_jit.label());
1626 X86Assembler::JmpDst coldPathBegin = m_jit.label();
1628 emitPutArg(X86::eax, 0);
1629 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1630 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1631 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
1632 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
1633 emitPutResult(instruction[i + 1].u.operand);
1635 // Track the location of the call; this will be used to recover repatch information.
1636 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1637 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1638 ++structureIDInstructionIndex;
1643 case op_loop_if_lesseq: {
1644 emitSlowScriptCheck(i);
1646 unsigned target = instruction[i + 3].u.operand;
1647 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1649 m_jit.link(iter->from, m_jit.label());
1650 emitPutArg(X86::edx, 0);
1651 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1652 emitCall(i, Machine::cti_op_loop_if_lesseq);
1653 m_jit.testl_rr(X86::eax, X86::eax);
1654 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1656 m_jit.link(iter->from, m_jit.label());
1657 m_jit.link((++iter)->from, m_jit.label());
1658 emitPutArg(X86::eax, 0);
1659 emitPutArg(X86::edx, 4);
1660 emitCall(i, Machine::cti_op_loop_if_lesseq);
1661 m_jit.testl_rr(X86::eax, X86::eax);
1662 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1668 unsigned srcDst = instruction[i + 1].u.operand;
1669 X86Assembler::JmpSrc notImm = iter->from;
1670 m_jit.link((++iter)->from, m_jit.label());
1671 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1672 m_jit.link(notImm, m_jit.label());
1673 emitPutArg(X86::eax, 0);
1674 emitCall(i, Machine::cti_op_pre_inc);
1675 emitPutResult(srcDst);
1679 case op_put_by_val: {
1680 // Normal slow cases - either is not an immediate imm, or is an array.
1681 X86Assembler::JmpSrc notImm = iter->from;
1682 m_jit.link((++iter)->from, m_jit.label());
1683 m_jit.link((++iter)->from, m_jit.label());
1684 emitFastArithIntToImmNoCheck(X86::edx);
1685 m_jit.link(notImm, m_jit.label());
1686 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1687 emitPutArg(X86::eax, 0);
1688 emitPutArg(X86::edx, 4);
1689 emitPutArg(X86::ecx, 8);
1690 emitCall(i, Machine::cti_op_put_by_val);
1691 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1693 // slow cases for immediate int accesses to arrays
1694 m_jit.link((++iter)->from, m_jit.label());
1695 m_jit.link((++iter)->from, m_jit.label());
1696 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1697 emitPutArg(X86::eax, 0);
1698 emitPutArg(X86::edx, 4);
1699 emitPutArg(X86::ecx, 8);
1700 emitCall(i, Machine::cti_op_put_by_val_array);
1705 case op_loop_if_true: {
1706 emitSlowScriptCheck(i);
1708 m_jit.link(iter->from, m_jit.label());
1709 emitPutArg(X86::eax, 0);
1710 emitCall(i, Machine::cti_op_jtrue);
1711 m_jit.testl_rr(X86::eax, X86::eax);
1712 unsigned target = instruction[i + 2].u.operand;
1713 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
1718 unsigned srcDst = instruction[i + 1].u.operand;
1719 X86Assembler::JmpSrc notImm = iter->from;
1720 m_jit.link((++iter)->from, m_jit.label());
1721 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1722 m_jit.link(notImm, m_jit.label());
1723 emitPutArg(X86::eax, 0);
1724 emitCall(i, Machine::cti_op_pre_dec);
1725 emitPutResult(srcDst);
1730 unsigned target = instruction[i + 3].u.operand;
1731 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1733 m_jit.link(iter->from, m_jit.label());
1734 emitPutArg(X86::edx, 0);
1735 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1736 emitCall(i, Machine::cti_op_jless);
1737 m_jit.testl_rr(X86::eax, X86::eax);
1738 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1740 m_jit.link(iter->from, m_jit.label());
1741 m_jit.link((++iter)->from, m_jit.label());
1742 emitPutArg(X86::eax, 0);
1743 emitPutArg(X86::edx, 4);
1744 emitCall(i, Machine::cti_op_jless);
1745 m_jit.testl_rr(X86::eax, X86::eax);
1746 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1752 m_jit.link(iter->from, m_jit.label());
1753 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1754 emitPutArg(X86::eax, 0);
1755 emitCall(i, Machine::cti_op_not);
1756 emitPutResult(instruction[i + 1].u.operand);
1761 m_jit.link(iter->from, m_jit.label());
1762 emitPutArg(X86::eax, 0);
1763 emitCall(i, Machine::cti_op_jtrue);
1764 m_jit.testl_rr(X86::eax, X86::eax);
1765 unsigned target = instruction[i + 2].u.operand;
1766 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
1771 unsigned srcDst = instruction[i + 2].u.operand;
1772 m_jit.link(iter->from, m_jit.label());
1773 m_jit.link((++iter)->from, m_jit.label());
1774 emitPutArg(X86::eax, 0);
1775 emitCall(i, Machine::cti_op_post_inc);
1776 emitPutResult(instruction[i + 1].u.operand);
1777 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1778 emitPutResult(srcDst);
1783 m_jit.link(iter->from, m_jit.label());
1784 emitPutArg(X86::eax, 0);
1785 emitCall(i, Machine::cti_op_bitnot);
1786 emitPutResult(instruction[i + 1].u.operand);
1791 unsigned src1 = instruction[i + 2].u.operand;
1792 unsigned src2 = instruction[i + 3].u.operand;
1793 unsigned dst = instruction[i + 1].u.operand;
1794 if (getConstantImmediateNumericArg(src1)) {
1795 m_jit.link(iter->from, m_jit.label());
1796 emitGetPutArg(src1, 0, X86::ecx);
1797 emitPutArg(X86::eax, 4);
1798 emitCall(i, Machine::cti_op_bitand);
1800 } else if (getConstantImmediateNumericArg(src2)) {
1801 m_jit.link(iter->from, m_jit.label());
1802 emitPutArg(X86::eax, 0);
1803 emitGetPutArg(src2, 4, X86::ecx);
1804 emitCall(i, Machine::cti_op_bitand);
1807 m_jit.link(iter->from, m_jit.label());
1808 emitGetPutArg(src1, 0, X86::ecx);
1809 emitPutArg(X86::edx, 4);
1810 emitCall(i, Machine::cti_op_bitand);
1817 m_jit.link(iter->from, m_jit.label());
1818 emitPutArg(X86::eax, 0);
1819 emitCall(i, Machine::cti_op_jtrue);
1820 m_jit.testl_rr(X86::eax, X86::eax);
1821 unsigned target = instruction[i + 2].u.operand;
1822 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
1827 unsigned srcDst = instruction[i + 2].u.operand;
1828 m_jit.link(iter->from, m_jit.label());
1829 m_jit.link((++iter)->from, m_jit.label());
1830 emitPutArg(X86::eax, 0);
1831 emitCall(i, Machine::cti_op_post_dec);
1832 emitPutResult(instruction[i + 1].u.operand);
1833 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1834 emitPutResult(srcDst);
1839 m_jit.link(iter->from, m_jit.label());
1840 emitPutArg(X86::eax, 0);
1841 emitPutArg(X86::edx, 4);
1842 emitCall(i, Machine::cti_op_bitxor);
1843 emitPutResult(instruction[i + 1].u.operand);
1848 m_jit.link(iter->from, m_jit.label());
1849 emitPutArg(X86::eax, 0);
1850 emitPutArg(X86::edx, 4);
1851 emitCall(i, Machine::cti_op_bitor);
1852 emitPutResult(instruction[i + 1].u.operand);
1857 X86Assembler::JmpSrc notImm1 = iter->from;
1858 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1859 m_jit.link((++iter)->from, m_jit.label());
1860 emitFastArithReTagImmediate(X86::eax);
1861 emitFastArithReTagImmediate(X86::ecx);
1862 m_jit.link(notImm1, m_jit.label());
1863 m_jit.link(notImm2, m_jit.label());
1864 emitPutArg(X86::eax, 0);
1865 emitPutArg(X86::ecx, 4);
1866 emitCall(i, Machine::cti_op_mod);
1867 emitPutResult(instruction[i + 1].u.operand);
1871 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_mul);
1873 m_jit.link(iter->from, m_jit.label());
1875 // Value is a JSCell - speculate false, check for StringObjectThatMasqueradesAsUndefined.
1876 m_jit.movl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1877 emitPutResult(instruction[i + 1].u.operand);
1878 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringObjectThatMasqueradesAsUndefinedVptr), X86::edx);
1879 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3]);
1881 // Value is a StringObjectThatMasqueradesAsUndefined
1882 m_jit.movl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1883 emitPutResult(instruction[i + 1].u.operand);
1884 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 3]);
1886 // Value is an immediate other than undefined/null
1887 m_jit.link((++iter)->from, m_jit.label());
1888 m_jit.movl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1889 emitPutResult(instruction[i + 1].u.operand);
1895 m_jit.link(iter->from, m_jit.label());
1897 // Value is a JSCell - speculate false, check for StringObjectThatMasqueradesAsUndefined.
1898 m_jit.movl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1899 emitPutResult(instruction[i + 1].u.operand);
1900 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringObjectThatMasqueradesAsUndefinedVptr), X86::edx);
1901 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3]);
1903 // Value is a StringObjectThatMasqueradesAsUndefined
1904 m_jit.movl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1905 emitPutResult(instruction[i + 1].u.operand);
1906 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 3]);
1908 // Value is an immediate other than undefined/null
1909 m_jit.link((++iter)->from, m_jit.label());
1910 m_jit.movl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1911 emitPutResult(instruction[i + 1].u.operand);
1917 ASSERT_NOT_REACHED();
1921 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
1924 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1927 void CTI::privateCompile()
1929 // Could use a popl_m, but would need to offset the following instruction if so.
1930 m_jit.popl_r(X86::ecx);
1931 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1932 emitPutToCallFrameHeader(X86::ecx, RegisterFile::CTIReturnEIP);
1934 privateCompileMainPass();
1935 privateCompileLinkPass();
1936 privateCompileSlowCases();
1938 ASSERT(m_jmpTable.isEmpty());
1940 void* code = m_jit.copy();
1943 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
1944 for (unsigned i = 0; i < m_switches.size(); ++i) {
1945 SwitchRecord record = m_switches[i];
1946 unsigned opcodeIndex = record.m_opcodeIndex;
1948 if (record.m_type != SwitchRecord::String) {
1949 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
1950 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
1952 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
1954 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
1955 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
1956 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
1959 ASSERT(record.m_type == SwitchRecord::String);
1961 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
1963 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
1964 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
1965 unsigned offset = it->second.branchOffset;
1966 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
1971 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
1972 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
1974 // FIXME: There doesn't seem to be a way to hint to a hashmap that it should make a certain capacity available;
1975 // could be faster if we could do something like this:
1976 // m_codeBlock->ctiReturnAddressVPCMap.grow(m_calls.size());
1977 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
1978 X86Assembler::link(code, iter->from, iter->to);
1979 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
1982 // Link absolute addresses for jsr
1983 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
1984 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
1986 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
1987 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
1988 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
1989 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
1993 m_codeBlock->ctiCode = code;
1996 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
1998 // Check eax is an object of the right StructureID.
1999 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2000 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2001 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2002 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2004 // Checks out okay! - getDirectOffset
2005 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2006 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2009 void* code = m_jit.copy();
2012 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2013 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2015 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2017 ctiRepatchCallByReturnAddress(returnAddress, code);
2020 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2022 #if USE(CTI_REPATCH_PIC)
2023 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2025 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2026 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2028 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2029 // referencing the prototype object - let's speculatively load it's table nice and early!)
2030 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2031 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2032 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2034 // check eax is an object of the right StructureID.
2035 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2036 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2037 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2038 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2040 // Check the prototype object's StructureID had not changed.
2041 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2042 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2043 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2045 // Checks out okay! - getDirectOffset
2046 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2048 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2050 void* code = m_jit.copy();
2053 // Use the repatch information to link the failure cases back to the original slow case routine.
2054 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2055 X86Assembler::link(code, failureCases1, slowCaseBegin);
2056 X86Assembler::link(code, failureCases2, slowCaseBegin);
2057 X86Assembler::link(code, failureCases3, slowCaseBegin);
2059 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2060 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2061 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2063 // Track the stub we have created so that it will be deleted later.
2064 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2066 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2067 // FIXME: should revert this repatching, on failure.
2068 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2069 X86Assembler::repatchBranchOffset(jmpLocation, code);
2071 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2072 // referencing the prototype object - let's speculatively load it's table nice and early!)
2073 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2074 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2075 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2077 // check eax is an object of the right StructureID.
2078 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2079 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2080 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2081 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2083 // Check the prototype object's StructureID had not changed.
2084 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2085 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2086 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2088 // Checks out okay! - getDirectOffset
2089 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2093 void* code = m_jit.copy();
2096 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2097 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2098 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2100 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2102 ctiRepatchCallByReturnAddress(returnAddress, code);
2106 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2110 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2112 // Check eax is an object of the right StructureID.
2113 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2114 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2115 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2116 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2118 StructureID* currStructureID = structureID;
2119 RefPtr<StructureID>* chainEntries = chain->head();
2120 JSObject* protoObject = 0;
2121 for (unsigned i = 0; i<count; ++i) {
2122 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2123 currStructureID = chainEntries[i].get();
2125 // Check the prototype object's StructureID had not changed.
2126 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2127 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2128 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2130 ASSERT(protoObject);
2132 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2133 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2134 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2137 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2139 void* code = m_jit.copy();
2142 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2143 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2145 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2147 ctiRepatchCallByReturnAddress(returnAddress, code);
2150 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2152 // check eax is an object of the right StructureID.
2153 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2154 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2155 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2156 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2158 // checks out okay! - putDirectOffset
2159 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2160 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2163 void* code = m_jit.copy();
2166 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2167 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2169 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2171 ctiRepatchCallByReturnAddress(returnAddress, code);
2176 static JSValue* SFX_CALL transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2178 StructureID* oldStructureID = newStructureID->previousID();
2180 baseObject->transitionTo(newStructureID);
2182 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2183 baseObject->allocatePropertyStorage(oldStructureID->propertyMap().storageSize(), oldStructureID->propertyMap().size());
2185 baseObject->putDirectOffset(cachedOffset, value);
2191 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2193 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2196 if (oldStructureID->propertyMap().storageSize() < JSObject::inlineStorageCapacity)
2199 if (oldStructureID->propertyMap().size() != newStructureID->propertyMap().size())
2205 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2207 Vector<X86Assembler::JmpSrc, 16> failureCases;
2208 // check eax is an object of the right StructureID.
2209 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2210 failureCases.append(m_jit.emitUnlinkedJne());
2211 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2212 failureCases.append(m_jit.emitUnlinkedJne());
2213 Vector<X86Assembler::JmpSrc> successCases;
2216 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2217 // proto(ecx) = baseObject->structureID()->prototype()
2218 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_type), X86::ecx);
2219 failureCases.append(m_jit.emitUnlinkedJne());
2220 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2222 // ecx = baseObject->m_structureID
2223 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2224 // null check the prototype
2225 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2226 successCases.append(m_jit.emitUnlinkedJe());
2228 // Check the structure id
2229 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2230 failureCases.append(m_jit.emitUnlinkedJne());
2232 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2233 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_type), X86::ecx);
2234 failureCases.append(m_jit.emitUnlinkedJne());
2235 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2238 failureCases.append(m_jit.emitUnlinkedJne());
2239 for (unsigned i = 0; i < successCases.size(); ++i)
2240 m_jit.link(successCases[i], m_jit.label());
2242 X86Assembler::JmpSrc callTarget;
2243 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2244 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2245 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2246 // codeblock should ensure oldStructureID->m_refCount > 0
2247 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2248 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2249 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2252 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2253 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2255 // Slow case transition -- we're going to need to quite a bit of work,
2256 // so just make a call
2257 m_jit.pushl_r(X86::edx);
2258 m_jit.pushl_r(X86::eax);
2259 m_jit.movl_i32r(cachedOffset, X86::eax);
2260 m_jit.pushl_r(X86::eax);
2261 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2262 m_jit.pushl_r(X86::eax);
2263 callTarget = m_jit.emitCall();
2264 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2267 void* code = m_jit.copy();
2270 for (unsigned i = 0; i < failureCases.size(); ++i)
2271 X86Assembler::link(code, failureCases[i], reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2273 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2274 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2276 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2278 ctiRepatchCallByReturnAddress(returnAddress, code);
2281 void* CTI::privateCompileArrayLengthTrampoline()
2283 // Check eax is an array
2284 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2285 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2286 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2287 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2289 // Checks out okay! - get the length from the storage
2290 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2291 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2293 m_jit.addl_rr(X86::eax, X86::eax);
2294 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2295 m_jit.addl_i8r(1, X86::eax);
2299 void* code = m_jit.copy();
2302 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2303 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2304 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2309 void* CTI::privateCompileStringLengthTrampoline()
2311 // Check eax is a string
2312 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2313 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2314 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2315 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2317 // Checks out okay! - get the length from the Ustring.
2318 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2319 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2321 m_jit.addl_rr(X86::eax, X86::eax);
2322 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2323 m_jit.addl_i8r(1, X86::eax);
2327 void* code = m_jit.copy();
2330 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2331 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2332 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2337 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2339 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2341 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2342 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2343 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2345 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2346 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2347 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2350 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2352 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2354 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2355 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2356 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2358 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2359 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2360 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2363 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2365 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2367 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2368 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2370 // Check eax is an array
2371 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2372 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2373 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2374 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2376 // Checks out okay! - get the length from the storage
2377 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2378 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2380 m_jit.addl_rr(X86::ecx, X86::ecx);
2381 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2382 m_jit.addl_i8r(1, X86::ecx);
2384 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2386 void* code = m_jit.copy();
2389 // Use the repatch information to link the failure cases back to the original slow case routine.
2390 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2391 X86Assembler::link(code, failureCases1, slowCaseBegin);
2392 X86Assembler::link(code, failureCases2, slowCaseBegin);
2393 X86Assembler::link(code, failureCases3, slowCaseBegin);
2395 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2396 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2397 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2399 // Track the stub we have created so that it will be deleted later.
2400 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2402 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2403 // FIXME: should revert this repatching, on failure.
2404 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2405 X86Assembler::repatchBranchOffset(jmpLocation, code);
2408 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
2410 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
2411 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
2412 m_jit.movl_mr(index * sizeof(Register), dst, dst);
2415 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
2417 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
2418 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
2419 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
2424 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
2426 // TODO: better error messages
2427 if (pattern.size() > MaxPatternSize) {
2428 *error_ptr = "regular expression too large";
2432 X86Assembler jit(exec->machine()->jitCodeBuffer());
2433 WRECParser parser(pattern, ignoreCase, multiline, jit);
2435 jit.emitConvertToFastCall();
2437 // Preserve regs & initialize outputRegister.
2438 jit.pushl_r(WRECGenerator::outputRegister);
2439 jit.pushl_r(WRECGenerator::currentValueRegister);
2440 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
2441 jit.pushl_r(WRECGenerator::currentPositionRegister);
2442 // load output pointer
2447 , X86::esp, WRECGenerator::outputRegister);
2449 // restart point on match fail.
2450 WRECGenerator::JmpDst nextLabel = jit.label();
2452 // (1) Parse Disjunction:
2454 // Parsing the disjunction should fully consume the pattern.
2455 JmpSrcVector failures;
2456 parser.parseDisjunction(failures);
2457 if (parser.isEndOfPattern()) {
2458 parser.m_err = WRECParser::Error_malformedPattern;
2461 // TODO: better error messages
2462 *error_ptr = "TODO: better error messages";
2467 // Set return value & pop registers from the stack.
2469 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
2470 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
2472 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
2473 jit.popl_r(X86::eax);
2474 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2475 jit.popl_r(WRECGenerator::currentValueRegister);
2476 jit.popl_r(WRECGenerator::outputRegister);
2479 jit.link(noOutput, jit.label());
2481 jit.popl_r(X86::eax);
2482 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2483 jit.popl_r(WRECGenerator::currentValueRegister);
2484 jit.popl_r(WRECGenerator::outputRegister);
2488 // All fails link to here. Progress the start point & if it is within scope, loop.
2489 // Otherwise, return fail value.
2490 WRECGenerator::JmpDst here = jit.label();
2491 for (unsigned i = 0; i < failures.size(); ++i)
2492 jit.link(failures[i], here);
2495 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
2496 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
2497 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
2498 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
2499 jit.link(jit.emitUnlinkedJle(), nextLabel);
2501 jit.addl_i8r(4, X86::esp);
2503 jit.movl_i32r(-1, X86::eax);
2504 jit.popl_r(WRECGenerator::currentValueRegister);
2505 jit.popl_r(WRECGenerator::outputRegister);
2508 *numSubpatterns_ptr = parser.m_numSubpatterns;
2510 void* code = jit.copy();
2515 #endif // ENABLE(WREC)
2519 #endif // ENABLE(CTI)