2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "JSFunction.h"
35 #include "wrec/WREC.h"
36 #include "ResultType.h"
39 #include <sys/sysctl.h>
48 static inline bool isSSE2Present()
50 return true; // All X86 Macs are guaranteed to support at least SSE2
55 static bool isSSE2Present()
57 static const int SSE2FeatureBit = 1 << 26;
64 mov eax, 1 // cpuid function 1 gives us the standard feature set
70 // FIXME: Add GCC code to do above asm
72 present = (flags & SSE2FeatureBit) != 0;
76 static SSE2Check check;
82 COMPILE_ASSERT(CTI_ARGS_code == 0xC, CTI_ARGS_code_is_C);
83 COMPILE_ASSERT(CTI_ARGS_callFrame == 0xE, CTI_ARGS_callFrame_is_E);
85 #if COMPILER(GCC) && PLATFORM(X86)
88 #define SYMBOL_STRING(name) "_" #name
90 #define SYMBOL_STRING(name) #name
94 ".globl " SYMBOL_STRING(ctiTrampoline) "\n"
95 SYMBOL_STRING(ctiTrampoline) ":" "\n"
98 "subl $0x24, %esp" "\n"
99 "movl $512, %esi" "\n"
100 "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = CTI_ARGS_callFrame (see assertion above)
101 "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
102 "addl $0x24, %esp" "\n"
109 ".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
110 SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
111 #if USE(CTI_ARGUMENT)
112 #if USE(FAST_CALL_CTI_ARGUMENT)
113 "movl %esp, %ecx" "\n"
115 "movl %esp, 0(%esp)" "\n"
117 "call " SYMBOL_STRING(_ZN3JSC7Machine12cti_vm_throwEPPv) "\n"
119 "call " SYMBOL_STRING(_ZN3JSC7Machine12cti_vm_throwEPvz) "\n"
121 "addl $0x24, %esp" "\n"
131 __declspec(naked) JSValue* ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue** exception, Profiler**, JSGlobalData*)
139 mov edi, [esp + 0x38];
140 call [esp + 0x30]; // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code (see assertion above)
148 __declspec(naked) void ctiVMThrowTrampoline()
152 call JSC::Machine::cti_vm_throw;
164 ALWAYS_INLINE bool CTI::isConstant(int src)
166 return src >= m_codeBlock->numVars && src < m_codeBlock->numVars + m_codeBlock->numConstants;
169 ALWAYS_INLINE JSValue* CTI::getConstant(CallFrame* callFrame, int src)
171 return m_codeBlock->constantRegisters[src - m_codeBlock->numVars].jsValue(callFrame);
174 // get arg puts an arg from the SF register array into a h/w register
175 ALWAYS_INLINE void CTI::emitGetArg(int src, X86Assembler::RegisterID dst)
177 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
178 if (isConstant(src)) {
179 JSValue* js = getConstant(m_callFrame, src);
180 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
182 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
185 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
186 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
188 if (isConstant(src)) {
189 JSValue* js = getConstant(m_callFrame, src);
190 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
192 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
193 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
197 // puts an arg onto the stack, as an arg to a context threaded function.
198 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
200 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
203 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
205 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
208 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
210 if (isConstant(src)) {
211 JSValue* js = getConstant(m_callFrame, src);
212 return JSImmediate::isNumber(js) ? js : 0;
217 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
219 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
222 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
224 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
227 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
229 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
232 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
234 m_jit.movl_rm(from, entry * sizeof(Register), X86::edi);
237 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
239 m_jit.movl_mr(entry * sizeof(Register), X86::edi, to);
242 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
244 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
245 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
248 ALWAYS_INLINE void CTI::emitInitRegister(unsigned dst)
250 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
251 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
254 #if ENABLE(SAMPLING_TOOL)
255 unsigned inCalledCode = 0;
258 void ctiSetReturnAddress(void** where, void* what)
263 void ctiRepatchCallByReturnAddress(void* where, void* what)
265 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
270 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
273 if (isConstant(src1)) {
274 JSValue* js = getConstant(m_callFrame, src1);
276 JSImmediate::isImmediate(js) ?
277 (JSImmediate::isNumber(js) ? 'i' :
278 JSImmediate::isBoolean(js) ? 'b' :
279 js->isUndefined() ? 'u' :
280 js->isNull() ? 'n' : '?')
282 (js->isString() ? 's' :
283 js->isObject() ? 'o' :
287 if (isConstant(src2)) {
288 JSValue* js = getConstant(m_callFrame, src2);
290 JSImmediate::isImmediate(js) ?
291 (JSImmediate::isNumber(js) ? 'i' :
292 JSImmediate::isBoolean(js) ? 'b' :
293 js->isUndefined() ? 'u' :
294 js->isNull() ? 'n' : '?')
296 (js->isString() ? 's' :
297 js->isObject() ? 'o' :
300 if ((which1 != '*') | (which2 != '*'))
301 fprintf(stderr, "Types %c %c\n", which1, which2);
306 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, X86::RegisterID r)
308 m_jit.emitRestoreArgumentReference();
309 X86Assembler::JmpSrc call = m_jit.emitCall(r);
310 m_calls.append(CallRecord(call, opcodeIndex));
315 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
317 #if ENABLE(SAMPLING_TOOL)
318 m_jit.movl_i32m(1, &inCalledCode);
320 m_jit.emitRestoreArgumentReference();
321 X86Assembler::JmpSrc call = m_jit.emitCall();
322 m_calls.append(CallRecord(call, helper, opcodeIndex));
323 #if ENABLE(SAMPLING_TOOL)
324 m_jit.movl_i32m(0, &inCalledCode);
330 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
332 #if ENABLE(SAMPLING_TOOL)
333 m_jit.movl_i32m(1, &inCalledCode);
335 m_jit.emitRestoreArgumentReference();
336 X86Assembler::JmpSrc call = m_jit.emitCall();
337 m_calls.append(CallRecord(call, helper, opcodeIndex));
338 #if ENABLE(SAMPLING_TOOL)
339 m_jit.movl_i32m(0, &inCalledCode);
345 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
347 #if ENABLE(SAMPLING_TOOL)
348 m_jit.movl_i32m(1, &inCalledCode);
350 m_jit.emitRestoreArgumentReference();
351 X86Assembler::JmpSrc call = m_jit.emitCall();
352 m_calls.append(CallRecord(call, helper, opcodeIndex));
353 #if ENABLE(SAMPLING_TOOL)
354 m_jit.movl_i32m(0, &inCalledCode);
360 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
362 #if ENABLE(SAMPLING_TOOL)
363 m_jit.movl_i32m(1, &inCalledCode);
365 m_jit.emitRestoreArgumentReference();
366 X86Assembler::JmpSrc call = m_jit.emitCall();
367 m_calls.append(CallRecord(call, helper, opcodeIndex));
368 #if ENABLE(SAMPLING_TOOL)
369 m_jit.movl_i32m(0, &inCalledCode);
375 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
377 #if ENABLE(SAMPLING_TOOL)
378 m_jit.movl_i32m(1, &inCalledCode);
380 m_jit.emitRestoreArgumentReference();
381 X86Assembler::JmpSrc call = m_jit.emitCall();
382 m_calls.append(CallRecord(call, helper, opcodeIndex));
383 #if ENABLE(SAMPLING_TOOL)
384 m_jit.movl_i32m(0, &inCalledCode);
390 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_2 helper)
392 #if ENABLE(SAMPLING_TOOL)
393 m_jit.movl_i32m(1, &inCalledCode);
395 m_jit.emitRestoreArgumentReference();
396 X86Assembler::JmpSrc call = m_jit.emitCall();
397 m_calls.append(CallRecord(call, helper, opcodeIndex));
398 #if ENABLE(SAMPLING_TOOL)
399 m_jit.movl_i32m(0, &inCalledCode);
405 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
407 m_jit.testl_i32r(JSImmediate::TagMask, reg);
408 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
411 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
413 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
414 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
417 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
419 m_jit.movl_rr(reg1, X86::ecx);
420 m_jit.andl_rr(reg2, X86::ecx);
421 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
424 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
426 ASSERT(JSImmediate::isNumber(imm));
427 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
430 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
432 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
435 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitFastArithDeTagImmediateJumpIfZero(X86Assembler::RegisterID reg)
437 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
438 return m_jit.emitUnlinkedJe();
441 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
443 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
446 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
448 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
451 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
453 m_jit.sarl_i8r(1, reg);
456 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
458 m_jit.addl_rr(reg, reg);
459 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
460 emitFastArithReTagImmediate(reg);
463 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
465 m_jit.addl_rr(reg, reg);
466 emitFastArithReTagImmediate(reg);
469 ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
471 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
472 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
475 CTI::CTI(Machine* machine, CallFrame* callFrame, CodeBlock* codeBlock)
476 : m_jit(machine->jitCodeBuffer())
478 , m_callFrame(callFrame)
479 , m_codeBlock(codeBlock)
480 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
481 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
485 #define CTI_COMPILE_BINARY_OP(name) \
487 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
488 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
489 emitCall(i, Machine::cti_##name); \
490 emitPutResult(instruction[i + 1].u.operand); \
495 #define CTI_COMPILE_UNARY_OP(name) \
497 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
498 emitCall(i, Machine::cti_##name); \
499 emitPutResult(instruction[i + 1].u.operand); \
504 #if ENABLE(SAMPLING_TOOL)
505 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
508 void CTI::compileOpCallInitializeCallFrame(unsigned callee, unsigned argCount)
510 emitGetArg(callee, X86::ecx); // Load callee JSFunction into ecx
511 m_jit.movl_rm(X86::eax, RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edx); // callee CodeBlock was returned in eax
512 m_jit.movl_i32m(reinterpret_cast<unsigned>(nullJSValue), RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)), X86::edx);
513 m_jit.movl_rm(X86::ecx, RegisterFile::Callee * static_cast<int>(sizeof(Register)), X86::edx);
515 m_jit.movl_mr(OBJECT_OFFSET(JSFunction, m_scopeChain) + OBJECT_OFFSET(ScopeChain, m_node), X86::ecx, X86::ecx); // newScopeChain
516 m_jit.movl_i32m(argCount, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)), X86::edx);
517 m_jit.movl_rm(X86::edi, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register)), X86::edx);
518 m_jit.movl_rm(X86::ecx, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)), X86::edx);
521 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
523 int dst = instruction[i + 1].u.operand;
524 int callee = instruction[i + 2].u.operand;
525 int firstArg = instruction[i + 4].u.operand;
526 int argCount = instruction[i + 5].u.operand;
527 int registerOffset = instruction[i + 6].u.operand;
529 if (type == OpCallEval)
530 emitGetPutArg(instruction[i + 3].u.operand, 16, X86::ecx);
532 if (type == OpConstruct) {
533 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 20);
534 emitPutArgConstant(argCount, 16);
535 emitPutArgConstant(registerOffset, 12);
536 emitPutArgConstant(firstArg, 8);
537 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
539 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 12);
540 emitPutArgConstant(argCount, 8);
541 emitPutArgConstant(registerOffset, 4);
543 int thisVal = instruction[i + 3].u.operand;
544 if (thisVal == missingThisObjectMarker()) {
545 // FIXME: should this be loaded dynamically off m_callFrame?
546 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_callFrame->globalThisValue()), firstArg * sizeof(Register), X86::edi);
548 emitGetArg(thisVal, X86::ecx);
549 emitPutResult(firstArg, X86::ecx);
553 X86Assembler::JmpSrc wasEval;
554 if (type == OpCallEval) {
555 emitGetPutArg(callee, 0, X86::ecx);
556 emitCall(i, Machine::cti_op_call_eval);
558 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
559 wasEval = m_jit.emitUnlinkedJne();
561 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
562 emitGetArg(callee, X86::ecx);
564 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
565 emitGetPutArg(callee, 0, X86::ecx);
568 // Fast check for JS function.
569 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
570 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
571 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
572 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
573 m_jit.link(isNotObject, m_jit.label());
575 // This handles host functions
576 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
578 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
579 m_jit.link(isJSFunction, m_jit.label());
581 // This handles JSFunctions
582 emitCall(i, (type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction);
584 compileOpCallInitializeCallFrame(callee, argCount);
586 // load ctiCode from the new codeBlock.
587 m_jit.movl_mr(OBJECT_OFFSET(CodeBlock, ctiCode), X86::eax, X86::eax);
589 // Put the new value of 'callFrame' into edi and onto the stack, too.
590 emitPutCTIParam(X86::edx, CTI_ARGS_callFrame);
591 m_jit.movl_rr(X86::edx, X86::edi);
593 // Check the ctiCode has been generated - if not, this is handled in a slow case.
594 m_jit.testl_rr(X86::eax, X86::eax);
595 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
596 emitCall(i, X86::eax);
598 X86Assembler::JmpDst end = m_jit.label();
599 m_jit.link(wasNotJSFunction, end);
600 if (type == OpCallEval)
601 m_jit.link(wasEval, end);
603 // Put the return value in dst. In the interpreter, op_ret does this.
607 void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
609 bool negated = (type == OpNStrictEq);
611 unsigned dst = instruction[i + 1].u.operand;
612 unsigned src1 = instruction[i + 2].u.operand;
613 unsigned src2 = instruction[i + 3].u.operand;
615 emitGetArg(src1, X86::eax);
616 emitGetArg(src2, X86::edx);
618 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
619 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
620 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
621 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
623 m_jit.cmpl_rr(X86::edx, X86::eax);
625 m_jit.setne_r(X86::eax);
627 m_jit.sete_r(X86::eax);
628 m_jit.movzbl_rr(X86::eax, X86::eax);
629 emitTagAsBoolImmediate(X86::eax);
631 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
633 m_jit.link(firstNotImmediate, m_jit.label());
635 // check that edx is immediate but not the zero immediate
636 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
637 m_jit.setz_r(X86::ecx);
638 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
639 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
640 m_jit.sete_r(X86::edx);
641 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
642 m_jit.orl_rr(X86::ecx, X86::edx);
644 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
646 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
648 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
650 m_jit.link(secondNotImmediate, m_jit.label());
651 // check that eax is not the zero immediate (we know it must be immediate)
652 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
653 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
655 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
657 m_jit.link(bothWereImmediates, m_jit.label());
658 m_jit.link(firstWasNotImmediate, m_jit.label());
663 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
665 m_jit.subl_i8r(1, X86::esi);
666 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
667 emitCall(opcodeIndex, Machine::cti_timeout_check);
669 emitGetCTIParam(CTI_ARGS_globalData, X86::ecx);
670 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
671 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
672 m_jit.link(skipTimeout, m_jit.label());
676 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
678 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
679 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
681 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
682 control will fall through from the code planted.
684 void CTI::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
686 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
687 m_jit.cvttsd2si_rr(xmmSource, tempReg1);
688 m_jit.addl_rr(tempReg1, tempReg1);
689 m_jit.sarl_i8r(1, tempReg1);
690 m_jit.cvtsi2sd_rr(tempReg1, tempXmm);
691 // Compare & branch if immediate.
692 m_jit.ucomis_rr(tempXmm, xmmSource);
693 X86Assembler::JmpSrc resultIsImm = m_jit.emitUnlinkedJe();
694 X86Assembler::JmpDst resultLookedLikeImmButActuallyIsnt = m_jit.label();
696 // Store the result to the JSNumberCell and jump.
697 m_jit.movsd_rm(xmmSource, OBJECT_OFFSET(JSNumberCell, m_value), jsNumberCell);
698 emitPutResult(dst, jsNumberCell);
699 *wroteJSNumberCell = m_jit.emitUnlinkedJmp();
701 m_jit.link(resultIsImm, m_jit.label());
702 // value == (double)(JSImmediate)value... or at least, it looks that way...
703 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
704 m_jit.link(m_jit.emitUnlinkedJp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
705 m_jit.pextrw_irr(3, xmmSource, tempReg2);
706 m_jit.cmpl_i32r(0x8000, tempReg2);
707 m_jit.link(m_jit.emitUnlinkedJe(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
708 // Yes it really really really is representable as a JSImmediate.
709 emitFastArithIntToImmNoCheck(tempReg1);
710 emitPutResult(dst, X86::ecx);
713 void CTI::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
715 StructureID* numberStructureID = m_callFrame->globalData().numberStructureID.get();
716 X86Assembler::JmpSrc wasJSNumberCell1, wasJSNumberCell1b, wasJSNumberCell2, wasJSNumberCell2b;
718 emitGetArg(src1, X86::eax);
719 emitGetArg(src2, X86::edx);
721 if (types.second().isReusable() && isSSE2Present()) {
722 ASSERT(types.second().mightBeNumber());
724 // Check op2 is a number
725 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
726 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
727 if (!types.second().definitelyIsNumber()) {
728 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
729 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
730 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
733 // (1) In this case src2 is a reusable number cell.
734 // Slow case if src1 is not a number type.
735 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
736 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
737 if (!types.first().definitelyIsNumber()) {
738 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
739 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
740 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
743 // (1a) if we get here, src1 is also a number cell
744 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
745 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
746 // (1b) if we get here, src1 is an immediate
747 m_jit.link(op1imm, m_jit.label());
748 emitFastArithImmToInt(X86::eax);
749 m_jit.cvtsi2sd_rr(X86::eax, X86::xmm0);
751 m_jit.link(loadedDouble, m_jit.label());
752 if (opcodeID == op_add)
753 m_jit.addsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
754 else if (opcodeID == op_sub)
755 m_jit.subsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
757 ASSERT(opcodeID == op_mul);
758 m_jit.mulsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
761 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
762 wasJSNumberCell2b = m_jit.emitUnlinkedJmp();
764 // (2) This handles cases where src2 is an immediate number.
765 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
766 m_jit.link(op2imm, m_jit.label());
767 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
768 } else if (types.first().isReusable() && isSSE2Present()) {
769 ASSERT(types.first().mightBeNumber());
771 // Check op1 is a number
772 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
773 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
774 if (!types.first().definitelyIsNumber()) {
775 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
776 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
777 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
780 // (1) In this case src1 is a reusable number cell.
781 // Slow case if src2 is not a number type.
782 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
783 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
784 if (!types.second().definitelyIsNumber()) {
785 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
786 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
787 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
790 // (1a) if we get here, src2 is also a number cell
791 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
792 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
793 // (1b) if we get here, src2 is an immediate
794 m_jit.link(op2imm, m_jit.label());
795 emitFastArithImmToInt(X86::edx);
796 m_jit.cvtsi2sd_rr(X86::edx, X86::xmm1);
798 m_jit.link(loadedDouble, m_jit.label());
799 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
800 if (opcodeID == op_add)
801 m_jit.addsd_rr(X86::xmm1, X86::xmm0);
802 else if (opcodeID == op_sub)
803 m_jit.subsd_rr(X86::xmm1, X86::xmm0);
805 ASSERT(opcodeID == op_mul);
806 m_jit.mulsd_rr(X86::xmm1, X86::xmm0);
808 m_jit.movsd_rm(X86::xmm0, OBJECT_OFFSET(JSNumberCell, m_value), X86::eax);
811 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
812 wasJSNumberCell1b = m_jit.emitUnlinkedJmp();
814 // (2) This handles cases where src1 is an immediate number.
815 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
816 m_jit.link(op1imm, m_jit.label());
817 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
819 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
821 if (opcodeID == op_add) {
822 emitFastArithDeTagImmediate(X86::eax);
823 m_jit.addl_rr(X86::edx, X86::eax);
824 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
825 } else if (opcodeID == op_sub) {
826 m_jit.subl_rr(X86::edx, X86::eax);
827 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
828 emitFastArithReTagImmediate(X86::eax);
830 ASSERT(opcodeID == op_mul);
831 // convert eax & edx from JSImmediates to ints, and check if either are zero
832 emitFastArithImmToInt(X86::edx);
833 X86Assembler::JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
834 m_jit.testl_rr(X86::edx, X86::edx);
835 X86Assembler::JmpSrc op2NonZero = m_jit.emitUnlinkedJne();
836 m_jit.link(op1Zero, m_jit.label());
837 // if either input is zero, add the two together, and check if the result is < 0.
838 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
839 m_jit.movl_rr(X86::eax, X86::ecx);
840 m_jit.addl_rr(X86::edx, X86::ecx);
841 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJs(), i));
842 // Skip the above check if neither input is zero
843 m_jit.link(op2NonZero, m_jit.label());
844 m_jit.imull_rr(X86::edx, X86::eax);
845 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
846 emitFastArithReTagImmediate(X86::eax);
850 if (types.second().isReusable() && isSSE2Present()) {
851 m_jit.link(wasJSNumberCell2, m_jit.label());
852 m_jit.link(wasJSNumberCell2b, m_jit.label());
854 else if (types.first().isReusable() && isSSE2Present()) {
855 m_jit.link(wasJSNumberCell1, m_jit.label());
856 m_jit.link(wasJSNumberCell1b, m_jit.label());
860 void CTI::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
862 X86Assembler::JmpDst here = m_jit.label();
863 m_jit.link(iter->from, here);
864 if (types.second().isReusable() && isSSE2Present()) {
865 if (!types.first().definitelyIsNumber()) {
866 m_jit.link((++iter)->from, here);
867 m_jit.link((++iter)->from, here);
869 if (!types.second().definitelyIsNumber()) {
870 m_jit.link((++iter)->from, here);
871 m_jit.link((++iter)->from, here);
873 m_jit.link((++iter)->from, here);
874 } else if (types.first().isReusable() && isSSE2Present()) {
875 if (!types.first().definitelyIsNumber()) {
876 m_jit.link((++iter)->from, here);
877 m_jit.link((++iter)->from, here);
879 if (!types.second().definitelyIsNumber()) {
880 m_jit.link((++iter)->from, here);
881 m_jit.link((++iter)->from, here);
883 m_jit.link((++iter)->from, here);
885 m_jit.link((++iter)->from, here);
887 // additional entry point to handle -0 cases.
888 if (opcodeID == op_mul)
889 m_jit.link((++iter)->from, here);
891 emitGetPutArg(src1, 0, X86::ecx);
892 emitGetPutArg(src2, 4, X86::ecx);
893 if (opcodeID == op_add)
894 emitCall(i, Machine::cti_op_add);
895 else if (opcodeID == op_sub)
896 emitCall(i, Machine::cti_op_sub);
898 ASSERT(opcodeID == op_mul);
899 emitCall(i, Machine::cti_op_mul);
904 void CTI::privateCompileMainPass()
906 Instruction* instruction = m_codeBlock->instructions.begin();
907 unsigned instructionCount = m_codeBlock->instructions.size();
909 unsigned structureIDInstructionIndex = 0;
911 for (unsigned i = 0; i < instructionCount; ) {
912 m_labels[i] = m_jit.label();
914 #if ENABLE(SAMPLING_TOOL)
915 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
918 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
919 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
921 unsigned src = instruction[i + 2].u.operand;
923 m_jit.movl_i32r(reinterpret_cast<unsigned>(getConstant(m_callFrame, src)), X86::edx);
925 emitGetArg(src, X86::edx);
926 emitPutResult(instruction[i + 1].u.operand, X86::edx);
931 unsigned dst = instruction[i + 1].u.operand;
932 unsigned src1 = instruction[i + 2].u.operand;
933 unsigned src2 = instruction[i + 3].u.operand;
935 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
936 emitGetArg(src2, X86::edx);
937 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
938 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
939 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
940 emitPutResult(dst, X86::edx);
941 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
942 emitGetArg(src1, X86::eax);
943 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
944 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
945 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
948 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
949 if (types.first().mightBeNumber() && types.second().mightBeNumber())
950 compileBinaryArithOp(op_add, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
952 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
953 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
954 emitCall(i, Machine::cti_op_add);
955 emitPutResult(instruction[i + 1].u.operand);
963 if (m_codeBlock->needsFullScopeChain)
964 emitCall(i, Machine::cti_op_end);
965 emitGetArg(instruction[i + 1].u.operand, X86::eax);
966 #if ENABLE(SAMPLING_TOOL)
967 m_jit.movl_i32m(-1, ¤tOpcodeID);
969 m_jit.pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
975 unsigned target = instruction[i + 1].u.operand;
976 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
981 int srcDst = instruction[i + 1].u.operand;
982 emitGetArg(srcDst, X86::eax);
983 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
984 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
985 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
986 emitPutResult(srcDst, X86::eax);
991 emitSlowScriptCheck(i);
993 unsigned target = instruction[i + 1].u.operand;
994 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
998 case op_loop_if_less: {
999 emitSlowScriptCheck(i);
1001 unsigned target = instruction[i + 3].u.operand;
1002 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1004 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1005 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1006 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1007 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
1009 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1010 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1011 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1012 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1013 m_jit.cmpl_rr(X86::edx, X86::eax);
1014 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
1019 case op_loop_if_lesseq: {
1020 emitSlowScriptCheck(i);
1022 unsigned target = instruction[i + 3].u.operand;
1023 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1025 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1026 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1027 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1028 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
1030 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1031 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1032 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1033 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1034 m_jit.cmpl_rr(X86::edx, X86::eax);
1035 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
1040 case op_new_object: {
1041 emitCall(i, Machine::cti_op_new_object);
1042 emitPutResult(instruction[i + 1].u.operand);
1046 case op_put_by_id: {
1047 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
1048 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1049 // such that the StructureID & offset are always at the same distance from this.
1051 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1052 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1054 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1055 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1056 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1057 ++structureIDInstructionIndex;
1059 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
1060 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1061 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1062 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1063 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
1064 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1066 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1067 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1068 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
1069 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
1074 case op_get_by_id: {
1075 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
1076 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
1077 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1078 // to jump back to if one of these trampolies finds a match.
1080 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1082 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1084 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1085 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1086 ++structureIDInstructionIndex;
1088 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1089 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1090 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
1091 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1092 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
1094 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1095 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
1096 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
1097 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1102 case op_instanceof: {
1103 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
1104 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
1105 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
1107 // check if any are immediates
1108 m_jit.orl_rr(X86::eax, X86::ecx);
1109 m_jit.orl_rr(X86::edx, X86::ecx);
1110 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
1112 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
1114 // check that all are object type - this is a bit of a bithack to avoid excess branching;
1115 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
1116 // this works because NumberType and StringType are smaller
1117 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
1118 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
1119 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1120 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
1121 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
1122 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
1123 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1124 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
1126 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1128 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
1129 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
1130 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
1131 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
1133 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1135 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
1136 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
1138 // optimistically load true result
1139 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(true)), X86::eax);
1141 X86Assembler::JmpDst loop = m_jit.label();
1143 // load value's prototype
1144 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
1145 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
1147 m_jit.cmpl_rr(X86::ecx, X86::edx);
1148 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
1150 m_jit.cmpl_i32r(reinterpret_cast<int32_t>(jsNull()), X86::ecx);
1151 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
1152 m_jit.link(goToLoop, loop);
1154 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(false)), X86::eax);
1156 m_jit.link(exit, m_jit.label());
1158 emitPutResult(instruction[i + 1].u.operand);
1163 case op_del_by_id: {
1164 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1165 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1166 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1167 emitCall(i, Machine::cti_op_del_by_id);
1168 emitPutResult(instruction[i + 1].u.operand);
1173 unsigned dst = instruction[i + 1].u.operand;
1174 unsigned src1 = instruction[i + 2].u.operand;
1175 unsigned src2 = instruction[i + 3].u.operand;
1177 // For now, only plant a fast int case if the constant operand is greater than zero.
1178 JSValue* src1Value = getConstantImmediateNumericArg(src1);
1179 JSValue* src2Value = getConstantImmediateNumericArg(src2);
1181 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {
1182 emitGetArg(src2, X86::eax);
1183 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1184 emitFastArithDeTagImmediate(X86::eax);
1185 m_jit.imull_i32r(X86::eax, value, X86::eax);
1186 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1187 emitFastArithReTagImmediate(X86::eax);
1189 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {
1190 emitGetArg(src1, X86::eax);
1191 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1192 emitFastArithDeTagImmediate(X86::eax);
1193 m_jit.imull_i32r(X86::eax, value, X86::eax);
1194 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1195 emitFastArithReTagImmediate(X86::eax);
1198 compileBinaryArithOp(op_mul, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1204 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
1205 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1206 emitCall(i, Machine::cti_op_new_func);
1207 emitPutResult(instruction[i + 1].u.operand);
1212 compileOpCall(instruction, i);
1216 case op_get_global_var: {
1217 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
1218 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1219 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
1220 emitPutResult(instruction[i + 1].u.operand, X86::eax);
1224 case op_put_global_var: {
1225 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
1226 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1227 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1228 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
1232 case op_get_scoped_var: {
1233 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
1235 emitGetArg(RegisterFile::ScopeChain, X86::eax);
1237 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
1239 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
1240 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
1241 emitPutResult(instruction[i + 1].u.operand);
1245 case op_put_scoped_var: {
1246 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
1248 emitGetArg(RegisterFile::ScopeChain, X86::edx);
1249 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1251 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
1253 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
1254 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
1258 case op_tear_off_activation: {
1259 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1260 emitCall(i, Machine::cti_op_tear_off_activation);
1264 case op_tear_off_arguments: {
1265 emitCall(i, Machine::cti_op_tear_off_arguments);
1270 // Check for a profiler - if there is one, jump to the hook below.
1271 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
1272 m_jit.cmpl_i32m(0, X86::eax);
1273 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
1274 X86Assembler::JmpDst profiled = m_jit.label();
1276 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
1277 if (m_codeBlock->needsFullScopeChain)
1278 emitCall(i, Machine::cti_op_ret_scopeChain);
1280 // Return the result in %eax.
1281 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1283 // Grab the return address.
1284 emitGetArg(RegisterFile::ReturnPC, X86::edx);
1286 // Restore our caller's "r".
1287 emitGetArg(RegisterFile::CallerFrame, X86::edi);
1288 emitPutCTIParam(X86::edi, CTI_ARGS_callFrame);
1291 m_jit.pushl_r(X86::edx);
1295 m_jit.link(profile, m_jit.label());
1296 emitCall(i, Machine::cti_op_ret_profiler);
1297 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
1302 case op_new_array: {
1303 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1304 emitPutArg(X86::edx, 0);
1305 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1306 emitCall(i, Machine::cti_op_new_array);
1307 emitPutResult(instruction[i + 1].u.operand);
1312 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1313 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1314 emitCall(i, Machine::cti_op_resolve);
1315 emitPutResult(instruction[i + 1].u.operand);
1319 case op_construct: {
1320 compileOpCall(instruction, i, OpConstruct);
1324 case op_construct_verify: {
1325 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1327 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1328 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1329 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1330 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1331 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1333 m_jit.link(isImmediate, m_jit.label());
1334 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
1335 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1336 m_jit.link(isObject, m_jit.label());
1341 case op_get_by_val: {
1342 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1343 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1344 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1345 emitFastArithImmToInt(X86::edx);
1346 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1347 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1348 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1349 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1351 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1352 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1353 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1354 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1356 // Get the value from the vector
1357 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1358 emitPutResult(instruction[i + 1].u.operand);
1362 case op_resolve_func: {
1363 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1364 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1365 emitCall(i, Machine::cti_op_resolve_func);
1366 emitPutResult(instruction[i + 1].u.operand);
1367 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1372 compileBinaryArithOp(op_sub, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1376 case op_put_by_val: {
1377 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1378 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1379 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1380 emitFastArithImmToInt(X86::edx);
1381 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1382 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1383 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1384 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1386 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1387 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1388 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1389 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1390 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1391 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1392 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1394 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1395 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1396 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1397 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1399 // All good - put the value into the array.
1400 m_jit.link(inFastVector, m_jit.label());
1401 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1402 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1406 CTI_COMPILE_BINARY_OP(op_lesseq)
1407 case op_loop_if_true: {
1408 emitSlowScriptCheck(i);
1410 unsigned target = instruction[i + 2].u.operand;
1411 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1413 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1414 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1415 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1416 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1418 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1419 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1420 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1421 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1423 m_jit.link(isZero, m_jit.label());
1427 case op_resolve_base: {
1428 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1429 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1430 emitCall(i, Machine::cti_op_resolve_base);
1431 emitPutResult(instruction[i + 1].u.operand);
1436 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1437 emitCall(i, Machine::cti_op_negate);
1438 emitPutResult(instruction[i + 1].u.operand);
1442 case op_resolve_skip: {
1443 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1444 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1445 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1446 emitCall(i, Machine::cti_op_resolve_skip);
1447 emitPutResult(instruction[i + 1].u.operand);
1451 case op_resolve_global: {
1453 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1454 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1455 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1456 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1458 // Check StructureID of global object
1459 m_jit.movl_i32r(globalObject, X86::eax);
1460 m_jit.movl_mr(structureIDAddr, X86::edx);
1461 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1462 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1463 m_slowCases.append(SlowCaseEntry(slowCase, i));
1465 // Load cached property
1466 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1467 m_jit.movl_mr(offsetAddr, X86::edx);
1468 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1469 emitPutResult(instruction[i + 1].u.operand);
1470 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1473 m_jit.link(slowCase, m_jit.label());
1474 emitPutArgConstant(globalObject, 0);
1475 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1476 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1477 emitCall(i, Machine::cti_op_resolve_global);
1478 emitPutResult(instruction[i + 1].u.operand);
1479 m_jit.link(end, m_jit.label());
1481 ++structureIDInstructionIndex;
1484 CTI_COMPILE_BINARY_OP(op_div)
1486 int srcDst = instruction[i + 1].u.operand;
1487 emitGetArg(srcDst, X86::eax);
1488 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1489 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1490 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1491 emitPutResult(srcDst, X86::eax);
1496 unsigned target = instruction[i + 3].u.operand;
1497 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1499 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1500 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1501 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1502 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1504 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1505 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1506 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1507 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1508 m_jit.cmpl_rr(X86::edx, X86::eax);
1509 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1515 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1516 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1517 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1518 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1519 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1520 emitPutResult(instruction[i + 1].u.operand);
1525 unsigned target = instruction[i + 2].u.operand;
1526 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1528 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1529 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1530 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1531 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1533 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1534 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1535 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1536 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1538 m_jit.link(isNonZero, m_jit.label());
1543 int srcDst = instruction[i + 2].u.operand;
1544 emitGetArg(srcDst, X86::eax);
1545 m_jit.movl_rr(X86::eax, X86::edx);
1546 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1547 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1548 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1549 emitPutResult(srcDst, X86::edx);
1550 emitPutResult(instruction[i + 1].u.operand);
1554 case op_unexpected_load: {
1555 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1556 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1557 emitPutResult(instruction[i + 1].u.operand);
1562 int retAddrDst = instruction[i + 1].u.operand;
1563 int target = instruction[i + 2].u.operand;
1564 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1565 X86Assembler::JmpDst addrPosition = m_jit.label();
1566 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1567 X86Assembler::JmpDst sretTarget = m_jit.label();
1568 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1573 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1578 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1579 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1580 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1581 m_jit.cmpl_rr(X86::edx, X86::eax);
1582 m_jit.sete_r(X86::eax);
1583 m_jit.movzbl_rr(X86::eax, X86::eax);
1584 emitTagAsBoolImmediate(X86::eax);
1585 emitPutResult(instruction[i + 1].u.operand);
1590 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1591 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1592 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1593 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1594 emitFastArithImmToInt(X86::eax);
1595 emitFastArithImmToInt(X86::ecx);
1596 m_jit.shll_CLr(X86::eax);
1597 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1598 emitPutResult(instruction[i + 1].u.operand);
1603 unsigned src1 = instruction[i + 2].u.operand;
1604 unsigned src2 = instruction[i + 3].u.operand;
1605 unsigned dst = instruction[i + 1].u.operand;
1606 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1607 emitGetArg(src2, X86::eax);
1608 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1609 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1611 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1612 emitGetArg(src1, X86::eax);
1613 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1614 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1617 emitGetArg(src1, X86::eax);
1618 emitGetArg(src2, X86::edx);
1619 m_jit.andl_rr(X86::edx, X86::eax);
1620 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1627 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1628 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1629 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1630 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1631 emitFastArithImmToInt(X86::ecx);
1632 m_jit.sarl_CLr(X86::eax);
1633 emitFastArithPotentiallyReTagImmediate(X86::eax);
1634 emitPutResult(instruction[i + 1].u.operand);
1639 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1640 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1641 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1642 emitPutResult(instruction[i + 1].u.operand);
1646 case op_resolve_with_base: {
1647 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1648 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1649 emitCall(i, Machine::cti_op_resolve_with_base);
1650 emitPutResult(instruction[i + 1].u.operand);
1651 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1655 case op_new_func_exp: {
1656 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1657 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1658 emitCall(i, Machine::cti_op_new_func_exp);
1659 emitPutResult(instruction[i + 1].u.operand);
1664 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1665 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1666 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1667 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1668 emitFastArithDeTagImmediate(X86::eax);
1669 m_slowCases.append(SlowCaseEntry(emitFastArithDeTagImmediateJumpIfZero(X86::ecx), i));
1671 m_jit.idivl_r(X86::ecx);
1672 emitFastArithReTagImmediate(X86::edx);
1673 m_jit.movl_rr(X86::edx, X86::eax);
1674 emitPutResult(instruction[i + 1].u.operand);
1679 unsigned target = instruction[i + 2].u.operand;
1680 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1682 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1683 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1684 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1685 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1687 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1688 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1689 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1690 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1692 m_jit.link(isZero, m_jit.label());
1696 CTI_COMPILE_BINARY_OP(op_less)
1698 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1699 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1700 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1701 m_jit.cmpl_rr(X86::eax, X86::edx);
1703 m_jit.setne_r(X86::eax);
1704 m_jit.movzbl_rr(X86::eax, X86::eax);
1705 emitTagAsBoolImmediate(X86::eax);
1707 emitPutResult(instruction[i + 1].u.operand);
1713 int srcDst = instruction[i + 2].u.operand;
1714 emitGetArg(srcDst, X86::eax);
1715 m_jit.movl_rr(X86::eax, X86::edx);
1716 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1717 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1718 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1719 emitPutResult(srcDst, X86::edx);
1720 emitPutResult(instruction[i + 1].u.operand);
1724 CTI_COMPILE_BINARY_OP(op_urshift)
1726 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1727 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1728 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1729 m_jit.xorl_rr(X86::edx, X86::eax);
1730 emitFastArithReTagImmediate(X86::eax);
1731 emitPutResult(instruction[i + 1].u.operand);
1735 case op_new_regexp: {
1736 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1737 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1738 emitCall(i, Machine::cti_op_new_regexp);
1739 emitPutResult(instruction[i + 1].u.operand);
1744 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1745 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1746 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1747 m_jit.orl_rr(X86::edx, X86::eax);
1748 emitPutResult(instruction[i + 1].u.operand);
1752 case op_call_eval: {
1753 compileOpCall(instruction, i, OpCallEval);
1758 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1759 emitCall(i, Machine::cti_op_throw);
1760 m_jit.addl_i8r(0x24, X86::esp);
1761 m_jit.popl_r(X86::edi);
1762 m_jit.popl_r(X86::esi);
1767 case op_get_pnames: {
1768 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1769 emitCall(i, Machine::cti_op_get_pnames);
1770 emitPutResult(instruction[i + 1].u.operand);
1774 case op_next_pname: {
1775 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1776 unsigned target = instruction[i + 3].u.operand;
1777 emitCall(i, Machine::cti_op_next_pname);
1778 m_jit.testl_rr(X86::eax, X86::eax);
1779 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1780 emitPutResult(instruction[i + 1].u.operand);
1781 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1782 m_jit.link(endOfIter, m_jit.label());
1786 case op_push_scope: {
1787 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1788 emitCall(i, Machine::cti_op_push_scope);
1792 case op_pop_scope: {
1793 emitCall(i, Machine::cti_op_pop_scope);
1797 CTI_COMPILE_UNARY_OP(op_typeof)
1798 CTI_COMPILE_UNARY_OP(op_is_undefined)
1799 CTI_COMPILE_UNARY_OP(op_is_boolean)
1800 CTI_COMPILE_UNARY_OP(op_is_number)
1801 CTI_COMPILE_UNARY_OP(op_is_string)
1802 CTI_COMPILE_UNARY_OP(op_is_object)
1803 CTI_COMPILE_UNARY_OP(op_is_function)
1805 compileOpStrictEq(instruction, i, OpStrictEq);
1809 case op_nstricteq: {
1810 compileOpStrictEq(instruction, i, OpNStrictEq);
1814 case op_to_jsnumber: {
1815 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1817 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1818 X86Assembler::JmpSrc wasImmediate = m_jit.emitUnlinkedJnz();
1820 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1822 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1823 m_jit.cmpl_i32m(NumberType, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::ecx);
1825 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1827 m_jit.link(wasImmediate, m_jit.label());
1829 emitPutResult(instruction[i + 1].u.operand);
1834 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1835 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1836 emitCall(i, Machine::cti_op_in);
1837 emitPutResult(instruction[i + 1].u.operand);
1841 case op_push_new_scope: {
1842 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1843 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1844 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1845 emitCall(i, Machine::cti_op_push_new_scope);
1846 emitPutResult(instruction[i + 1].u.operand);
1851 emitGetCTIParam(CTI_ARGS_callFrame, X86::edi); // edi := r
1852 emitPutResult(instruction[i + 1].u.operand);
1856 case op_jmp_scopes: {
1857 unsigned count = instruction[i + 1].u.operand;
1858 emitPutArgConstant(count, 0);
1859 emitCall(i, Machine::cti_op_jmp_scopes);
1860 unsigned target = instruction[i + 2].u.operand;
1861 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1865 case op_put_by_index: {
1866 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1867 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1868 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1869 emitCall(i, Machine::cti_op_put_by_index);
1873 case op_switch_imm: {
1874 unsigned tableIndex = instruction[i + 1].u.operand;
1875 unsigned defaultOffset = instruction[i + 2].u.operand;
1876 unsigned scrutinee = instruction[i + 3].u.operand;
1878 // create jump table for switch destinations, track this switch statement.
1879 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1880 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1881 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1883 emitGetPutArg(scrutinee, 0, X86::ecx);
1884 emitPutArgConstant(tableIndex, 4);
1885 emitCall(i, Machine::cti_op_switch_imm);
1886 m_jit.jmp_r(X86::eax);
1890 case op_switch_char: {
1891 unsigned tableIndex = instruction[i + 1].u.operand;
1892 unsigned defaultOffset = instruction[i + 2].u.operand;
1893 unsigned scrutinee = instruction[i + 3].u.operand;
1895 // create jump table for switch destinations, track this switch statement.
1896 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1897 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1898 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1900 emitGetPutArg(scrutinee, 0, X86::ecx);
1901 emitPutArgConstant(tableIndex, 4);
1902 emitCall(i, Machine::cti_op_switch_char);
1903 m_jit.jmp_r(X86::eax);
1907 case op_switch_string: {
1908 unsigned tableIndex = instruction[i + 1].u.operand;
1909 unsigned defaultOffset = instruction[i + 2].u.operand;
1910 unsigned scrutinee = instruction[i + 3].u.operand;
1912 // create jump table for switch destinations, track this switch statement.
1913 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1914 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1916 emitGetPutArg(scrutinee, 0, X86::ecx);
1917 emitPutArgConstant(tableIndex, 4);
1918 emitCall(i, Machine::cti_op_switch_string);
1919 m_jit.jmp_r(X86::eax);
1923 case op_del_by_val: {
1924 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1925 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1926 emitCall(i, Machine::cti_op_del_by_val);
1927 emitPutResult(instruction[i + 1].u.operand);
1931 case op_put_getter: {
1932 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1933 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1934 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1935 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1936 emitCall(i, Machine::cti_op_put_getter);
1940 case op_put_setter: {
1941 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1942 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1943 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1944 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1945 emitCall(i, Machine::cti_op_put_setter);
1949 case op_new_error: {
1950 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1951 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1952 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1953 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1954 emitCall(i, Machine::cti_op_new_error);
1955 emitPutResult(instruction[i + 1].u.operand);
1960 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1961 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1962 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1963 emitCall(i, Machine::cti_op_debug);
1968 unsigned dst = instruction[i + 1].u.operand;
1969 unsigned src1 = instruction[i + 2].u.operand;
1971 emitGetArg(src1, X86::eax);
1972 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1973 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1975 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1976 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1977 m_jit.setnz_r(X86::eax);
1979 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1981 m_jit.link(isImmediate, m_jit.label());
1983 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1984 m_jit.andl_rr(X86::eax, X86::ecx);
1985 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1986 m_jit.sete_r(X86::eax);
1988 m_jit.link(wasNotImmediate, m_jit.label());
1990 m_jit.movzbl_rr(X86::eax, X86::eax);
1991 emitTagAsBoolImmediate(X86::eax);
1998 unsigned dst = instruction[i + 1].u.operand;
1999 unsigned src1 = instruction[i + 2].u.operand;
2001 emitGetArg(src1, X86::eax);
2002 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2003 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
2005 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2006 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
2007 m_jit.setz_r(X86::eax);
2009 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
2011 m_jit.link(isImmediate, m_jit.label());
2013 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
2014 m_jit.andl_rr(X86::eax, X86::ecx);
2015 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
2016 m_jit.setne_r(X86::eax);
2018 m_jit.link(wasNotImmediate, m_jit.label());
2020 m_jit.movzbl_rr(X86::eax, X86::eax);
2021 emitTagAsBoolImmediate(X86::eax);
2028 // Even though CTI doesn't use them, we initialize our constant
2029 // registers to zap stale pointers, to avoid unnecessarily prolonging
2030 // object lifetime and increasing GC pressure.
2031 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
2032 for (size_t j = 0; j < count; ++j)
2033 emitInitRegister(j);
2038 case op_enter_with_activation: {
2039 // Even though CTI doesn't use them, we initialize our constant
2040 // registers to zap stale pointers, to avoid unnecessarily prolonging
2041 // object lifetime and increasing GC pressure.
2042 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
2043 for (size_t j = 0; j < count; ++j)
2044 emitInitRegister(j);
2046 emitCall(i, Machine::cti_op_push_activation);
2047 emitPutResult(instruction[i + 1].u.operand);
2052 case op_create_arguments: {
2053 emitCall(i, Machine::cti_op_create_arguments);
2057 case op_convert_this: {
2058 emitGetArg(instruction[i + 1].u.operand, X86::eax);
2060 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
2061 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::edx);
2062 m_jit.testl_i32m(NeedsThisConversion, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx);
2063 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
2068 case op_get_array_length:
2069 case op_get_by_id_chain:
2070 case op_get_by_id_generic:
2071 case op_get_by_id_proto:
2072 case op_get_by_id_self:
2073 case op_get_string_length:
2074 case op_put_by_id_generic:
2075 case op_put_by_id_replace:
2076 case op_put_by_id_transition:
2077 ASSERT_NOT_REACHED();
2081 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2085 void CTI::privateCompileLinkPass()
2087 unsigned jmpTableCount = m_jmpTable.size();
2088 for (unsigned i = 0; i < jmpTableCount; ++i)
2089 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
2093 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
2095 m_jit.link(iter->from, m_jit.label()); \
2096 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
2097 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
2098 emitCall(i, Machine::cti_##name); \
2099 emitPutResult(instruction[i + 1].u.operand); \
2104 void CTI::privateCompileSlowCases()
2106 unsigned structureIDInstructionIndex = 0;
2108 Instruction* instruction = m_codeBlock->instructions.begin();
2109 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
2110 unsigned i = iter->to;
2111 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
2112 case op_convert_this: {
2113 m_jit.link(iter->from, m_jit.label());
2114 m_jit.link((++iter)->from, m_jit.label());
2115 emitPutArg(X86::eax, 0);
2116 emitCall(i, Machine::cti_op_convert_this);
2117 emitPutResult(instruction[i + 1].u.operand);
2122 unsigned dst = instruction[i + 1].u.operand;
2123 unsigned src1 = instruction[i + 2].u.operand;
2124 unsigned src2 = instruction[i + 3].u.operand;
2125 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
2126 X86Assembler::JmpSrc notImm = iter->from;
2127 m_jit.link((++iter)->from, m_jit.label());
2128 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
2129 m_jit.link(notImm, m_jit.label());
2130 emitGetPutArg(src1, 0, X86::ecx);
2131 emitPutArg(X86::edx, 4);
2132 emitCall(i, Machine::cti_op_add);
2134 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
2135 X86Assembler::JmpSrc notImm = iter->from;
2136 m_jit.link((++iter)->from, m_jit.label());
2137 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
2138 m_jit.link(notImm, m_jit.label());
2139 emitPutArg(X86::eax, 0);
2140 emitGetPutArg(src2, 4, X86::ecx);
2141 emitCall(i, Machine::cti_op_add);
2144 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
2145 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2146 compileBinaryArithOpSlowCase(op_add, iter, dst, src1, src2, types, i);
2148 ASSERT_NOT_REACHED();
2154 case op_get_by_val: {
2155 // The slow case that handles accesses to arrays (below) may jump back up to here.
2156 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
2158 X86Assembler::JmpSrc notImm = iter->from;
2159 m_jit.link((++iter)->from, m_jit.label());
2160 m_jit.link((++iter)->from, m_jit.label());
2161 emitFastArithIntToImmNoCheck(X86::edx);
2162 m_jit.link(notImm, m_jit.label());
2163 emitPutArg(X86::eax, 0);
2164 emitPutArg(X86::edx, 4);
2165 emitCall(i, Machine::cti_op_get_by_val);
2166 emitPutResult(instruction[i + 1].u.operand);
2167 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2169 // This is slow case that handles accesses to arrays above the fast cut-off.
2170 // First, check if this is an access to the vector
2171 m_jit.link((++iter)->from, m_jit.label());
2172 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
2173 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
2175 // okay, missed the fast region, but it is still in the vector. Get the value.
2176 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
2177 // Check whether the value loaded is zero; if so we need to return undefined.
2178 m_jit.testl_rr(X86::ecx, X86::ecx);
2179 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
2180 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
2186 compileBinaryArithOpSlowCase(op_sub, iter, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2191 m_jit.link(iter->from, m_jit.label());
2192 m_jit.link((++iter)->from, m_jit.label());
2193 emitPutArg(X86::eax, 0);
2194 emitPutArg(X86::ecx, 4);
2195 emitCall(i, Machine::cti_op_rshift);
2196 emitPutResult(instruction[i + 1].u.operand);
2201 X86Assembler::JmpSrc notImm1 = iter->from;
2202 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2203 m_jit.link((++iter)->from, m_jit.label());
2204 emitGetArg(instruction[i + 2].u.operand, X86::eax);
2205 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2206 m_jit.link(notImm1, m_jit.label());
2207 m_jit.link(notImm2, m_jit.label());
2208 emitPutArg(X86::eax, 0);
2209 emitPutArg(X86::ecx, 4);
2210 emitCall(i, Machine::cti_op_lshift);
2211 emitPutResult(instruction[i + 1].u.operand);
2215 case op_loop_if_less: {
2216 emitSlowScriptCheck(i);
2218 unsigned target = instruction[i + 3].u.operand;
2219 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2221 m_jit.link(iter->from, m_jit.label());
2222 emitPutArg(X86::edx, 0);
2223 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2224 emitCall(i, Machine::cti_op_loop_if_less);
2225 m_jit.testl_rr(X86::eax, X86::eax);
2226 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2228 m_jit.link(iter->from, m_jit.label());
2229 m_jit.link((++iter)->from, m_jit.label());
2230 emitPutArg(X86::eax, 0);
2231 emitPutArg(X86::edx, 4);
2232 emitCall(i, Machine::cti_op_loop_if_less);
2233 m_jit.testl_rr(X86::eax, X86::eax);
2234 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2239 case op_put_by_id: {
2240 m_jit.link(iter->from, m_jit.label());
2241 m_jit.link((++iter)->from, m_jit.label());
2243 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2244 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2245 emitPutArg(X86::eax, 0);
2246 emitPutArg(X86::edx, 8);
2247 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
2249 // Track the location of the call; this will be used to recover repatch information.
2250 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2251 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2252 ++structureIDInstructionIndex;
2257 case op_get_by_id: {
2258 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
2259 // so that we only need track one pointer into the slow case code - we track a pointer to the location
2260 // of the call (which we can use to look up the repatch information), but should a array-length or
2261 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
2262 // the distance from the call to the head of the slow case.
2264 m_jit.link(iter->from, m_jit.label());
2265 m_jit.link((++iter)->from, m_jit.label());
2268 X86Assembler::JmpDst coldPathBegin = m_jit.label();
2270 emitPutArg(X86::eax, 0);
2271 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
2272 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2273 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
2274 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
2275 emitPutResult(instruction[i + 1].u.operand);
2277 // Track the location of the call; this will be used to recover repatch information.
2278 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2279 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2280 ++structureIDInstructionIndex;
2285 case op_resolve_global: {
2286 ++structureIDInstructionIndex;
2290 case op_loop_if_lesseq: {
2291 emitSlowScriptCheck(i);
2293 unsigned target = instruction[i + 3].u.operand;
2294 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2296 m_jit.link(iter->from, m_jit.label());
2297 emitPutArg(X86::edx, 0);
2298 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2299 emitCall(i, Machine::cti_op_loop_if_lesseq);
2300 m_jit.testl_rr(X86::eax, X86::eax);
2301 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2303 m_jit.link(iter->from, m_jit.label());
2304 m_jit.link((++iter)->from, m_jit.label());
2305 emitPutArg(X86::eax, 0);
2306 emitPutArg(X86::edx, 4);
2307 emitCall(i, Machine::cti_op_loop_if_lesseq);
2308 m_jit.testl_rr(X86::eax, X86::eax);
2309 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2315 unsigned srcDst = instruction[i + 1].u.operand;
2316 X86Assembler::JmpSrc notImm = iter->from;
2317 m_jit.link((++iter)->from, m_jit.label());
2318 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2319 m_jit.link(notImm, m_jit.label());
2320 emitPutArg(X86::eax, 0);
2321 emitCall(i, Machine::cti_op_pre_inc);
2322 emitPutResult(srcDst);
2326 case op_put_by_val: {
2327 // Normal slow cases - either is not an immediate imm, or is an array.
2328 X86Assembler::JmpSrc notImm = iter->from;
2329 m_jit.link((++iter)->from, m_jit.label());
2330 m_jit.link((++iter)->from, m_jit.label());
2331 emitFastArithIntToImmNoCheck(X86::edx);
2332 m_jit.link(notImm, m_jit.label());
2333 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2334 emitPutArg(X86::eax, 0);
2335 emitPutArg(X86::edx, 4);
2336 emitPutArg(X86::ecx, 8);
2337 emitCall(i, Machine::cti_op_put_by_val);
2338 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2340 // slow cases for immediate int accesses to arrays
2341 m_jit.link((++iter)->from, m_jit.label());
2342 m_jit.link((++iter)->from, m_jit.label());
2343 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2344 emitPutArg(X86::eax, 0);
2345 emitPutArg(X86::edx, 4);
2346 emitPutArg(X86::ecx, 8);
2347 emitCall(i, Machine::cti_op_put_by_val_array);
2352 case op_loop_if_true: {
2353 emitSlowScriptCheck(i);
2355 m_jit.link(iter->from, m_jit.label());
2356 emitPutArg(X86::eax, 0);
2357 emitCall(i, Machine::cti_op_jtrue);
2358 m_jit.testl_rr(X86::eax, X86::eax);
2359 unsigned target = instruction[i + 2].u.operand;
2360 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2365 unsigned srcDst = instruction[i + 1].u.operand;
2366 X86Assembler::JmpSrc notImm = iter->from;
2367 m_jit.link((++iter)->from, m_jit.label());
2368 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2369 m_jit.link(notImm, m_jit.label());
2370 emitPutArg(X86::eax, 0);
2371 emitCall(i, Machine::cti_op_pre_dec);
2372 emitPutResult(srcDst);
2377 unsigned target = instruction[i + 3].u.operand;
2378 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2380 m_jit.link(iter->from, m_jit.label());
2381 emitPutArg(X86::edx, 0);
2382 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2383 emitCall(i, Machine::cti_op_jless);
2384 m_jit.testl_rr(X86::eax, X86::eax);
2385 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2387 m_jit.link(iter->from, m_jit.label());
2388 m_jit.link((++iter)->from, m_jit.label());
2389 emitPutArg(X86::eax, 0);
2390 emitPutArg(X86::edx, 4);
2391 emitCall(i, Machine::cti_op_jless);
2392 m_jit.testl_rr(X86::eax, X86::eax);
2393 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2399 m_jit.link(iter->from, m_jit.label());
2400 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2401 emitPutArg(X86::eax, 0);
2402 emitCall(i, Machine::cti_op_not);
2403 emitPutResult(instruction[i + 1].u.operand);
2408 m_jit.link(iter->from, m_jit.label());
2409 emitPutArg(X86::eax, 0);
2410 emitCall(i, Machine::cti_op_jtrue);
2411 m_jit.testl_rr(X86::eax, X86::eax);
2412 unsigned target = instruction[i + 2].u.operand;
2413 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2418 unsigned srcDst = instruction[i + 2].u.operand;
2419 m_jit.link(iter->from, m_jit.label());
2420 m_jit.link((++iter)->from, m_jit.label());
2421 emitPutArg(X86::eax, 0);
2422 emitCall(i, Machine::cti_op_post_inc);
2423 emitPutResult(instruction[i + 1].u.operand);
2424 emitPutResult(srcDst, X86::edx);
2429 m_jit.link(iter->from, m_jit.label());
2430 emitPutArg(X86::eax, 0);
2431 emitCall(i, Machine::cti_op_bitnot);
2432 emitPutResult(instruction[i + 1].u.operand);
2437 unsigned src1 = instruction[i + 2].u.operand;
2438 unsigned src2 = instruction[i + 3].u.operand;
2439 unsigned dst = instruction[i + 1].u.operand;
2440 if (getConstantImmediateNumericArg(src1)) {
2441 m_jit.link(iter->from, m_jit.label());
2442 emitGetPutArg(src1, 0, X86::ecx);
2443 emitPutArg(X86::eax, 4);
2444 emitCall(i, Machine::cti_op_bitand);
2446 } else if (getConstantImmediateNumericArg(src2)) {
2447 m_jit.link(iter->from, m_jit.label());
2448 emitPutArg(X86::eax, 0);
2449 emitGetPutArg(src2, 4, X86::ecx);
2450 emitCall(i, Machine::cti_op_bitand);
2453 m_jit.link(iter->from, m_jit.label());
2454 emitGetPutArg(src1, 0, X86::ecx);
2455 emitPutArg(X86::edx, 4);
2456 emitCall(i, Machine::cti_op_bitand);
2463 m_jit.link(iter->from, m_jit.label());
2464 emitPutArg(X86::eax, 0);
2465 emitCall(i, Machine::cti_op_jtrue);
2466 m_jit.testl_rr(X86::eax, X86::eax);
2467 unsigned target = instruction[i + 2].u.operand;
2468 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2473 unsigned srcDst = instruction[i + 2].u.operand;
2474 m_jit.link(iter->from, m_jit.label());
2475 m_jit.link((++iter)->from, m_jit.label());
2476 emitPutArg(X86::eax, 0);
2477 emitCall(i, Machine::cti_op_post_dec);
2478 emitPutResult(instruction[i + 1].u.operand);
2479 emitPutResult(srcDst, X86::edx);
2484 m_jit.link(iter->from, m_jit.label());
2485 emitPutArg(X86::eax, 0);
2486 emitPutArg(X86::edx, 4);
2487 emitCall(i, Machine::cti_op_bitxor);
2488 emitPutResult(instruction[i + 1].u.operand);
2493 m_jit.link(iter->from, m_jit.label());
2494 emitPutArg(X86::eax, 0);
2495 emitPutArg(X86::edx, 4);
2496 emitCall(i, Machine::cti_op_bitor);
2497 emitPutResult(instruction[i + 1].u.operand);
2502 m_jit.link(iter->from, m_jit.label());
2503 emitPutArg(X86::eax, 0);
2504 emitPutArg(X86::edx, 4);
2505 emitCall(i, Machine::cti_op_eq);
2506 emitPutResult(instruction[i + 1].u.operand);
2511 m_jit.link(iter->from, m_jit.label());
2512 emitPutArg(X86::eax, 0);
2513 emitPutArg(X86::edx, 4);
2514 emitCall(i, Machine::cti_op_neq);
2515 emitPutResult(instruction[i + 1].u.operand);
2519 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2520 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2521 case op_instanceof: {
2522 m_jit.link(iter->from, m_jit.label());
2523 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2524 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2525 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2526 emitCall(i, Machine::cti_op_instanceof);
2527 emitPutResult(instruction[i + 1].u.operand);
2532 X86Assembler::JmpSrc notImm1 = iter->from;
2533 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2534 m_jit.link((++iter)->from, m_jit.label());
2535 emitFastArithReTagImmediate(X86::eax);
2536 emitFastArithReTagImmediate(X86::ecx);
2537 m_jit.link(notImm1, m_jit.label());
2538 m_jit.link(notImm2, m_jit.label());
2539 emitPutArg(X86::eax, 0);
2540 emitPutArg(X86::ecx, 4);
2541 emitCall(i, Machine::cti_op_mod);
2542 emitPutResult(instruction[i + 1].u.operand);
2547 int dst = instruction[i + 1].u.operand;
2548 int src1 = instruction[i + 2].u.operand;
2549 int src2 = instruction[i + 3].u.operand;
2550 JSValue* src1Value = getConstantImmediateNumericArg(src1);
2551 JSValue* src2Value = getConstantImmediateNumericArg(src2);
2553 if (src1Value && ((value = JSImmediate::intValue(src1Value)) > 0)) {
2554 m_jit.link(iter->from, m_jit.label());
2555 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2556 emitGetPutArg(src1, 0, X86::ecx);
2557 emitGetPutArg(src2, 4, X86::ecx);
2558 emitCall(i, Machine::cti_op_mul);
2560 } else if (src2Value && ((value = JSImmediate::intValue(src2Value)) > 0)) {
2561 m_jit.link(iter->from, m_jit.label());
2562 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2563 emitGetPutArg(src1, 0, X86::ecx);
2564 emitGetPutArg(src2, 4, X86::ecx);
2565 emitCall(i, Machine::cti_op_mul);
2568 compileBinaryArithOpSlowCase(op_mul, iter, dst, src1, src2, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2575 case op_construct: {
2576 m_jit.link(iter->from, m_jit.label());
2578 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2579 emitCall(i, Machine::cti_vm_compile);
2580 emitCall(i, X86::eax);
2582 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2584 // Put the return value in dst. In the interpreter, op_ret does this.
2585 emitPutResult(instruction[i + 1].u.operand);
2589 case op_to_jsnumber: {
2590 m_jit.link(iter->from, m_jit.label());
2591 m_jit.link(iter->from, m_jit.label());
2593 emitPutArg(X86::eax, 0);
2594 emitCall(i, Machine::cti_op_to_jsnumber);
2596 emitPutResult(instruction[i + 1].u.operand);
2602 ASSERT_NOT_REACHED();
2606 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2609 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2612 void CTI::privateCompile()
2614 // Could use a popl_m, but would need to offset the following instruction if so.
2615 m_jit.popl_r(X86::ecx);
2616 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2618 X86Assembler::JmpSrc slowRegisterFileCheck;
2619 X86Assembler::JmpDst afterRegisterFileCheck;
2620 if (m_codeBlock->codeType == FunctionCode) {
2621 emitGetCTIParam(CTI_ARGS_registerFile, X86::eax);
2622 m_jit.leal_mr(m_codeBlock->numCalleeRegisters * sizeof(Register), X86::edi, X86::edx);
2623 m_jit.cmpl_mr(OBJECT_OFFSET(RegisterFile, m_end), X86::eax, X86::edx);
2624 slowRegisterFileCheck = m_jit.emitUnlinkedJg();
2625 afterRegisterFileCheck = m_jit.label();
2628 privateCompileMainPass();
2629 privateCompileLinkPass();
2630 privateCompileSlowCases();
2632 if (m_codeBlock->codeType == FunctionCode) {
2633 m_jit.link(slowRegisterFileCheck, m_jit.label());
2634 emitCall(0, Machine::cti_register_file_check);
2635 X86Assembler::JmpSrc backToBody = m_jit.emitUnlinkedJmp();
2636 m_jit.link(backToBody, afterRegisterFileCheck);
2639 ASSERT(m_jmpTable.isEmpty());
2641 void* code = m_jit.copy();
2644 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2645 for (unsigned i = 0; i < m_switches.size(); ++i) {
2646 SwitchRecord record = m_switches[i];
2647 unsigned opcodeIndex = record.m_opcodeIndex;
2649 if (record.m_type != SwitchRecord::String) {
2650 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2651 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2653 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2655 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2656 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2657 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2660 ASSERT(record.m_type == SwitchRecord::String);
2662 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2664 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2665 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2666 unsigned offset = it->second.branchOffset;
2667 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2672 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2673 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2675 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2677 X86Assembler::link(code, iter->from, iter->to);
2678 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2681 // Link absolute addresses for jsr
2682 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2683 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2685 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2686 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2687 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2688 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2691 m_codeBlock->ctiCode = code;
2694 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2696 // Check eax is an object of the right StructureID.
2697 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2698 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2699 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2700 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2702 // Checks out okay! - getDirectOffset
2703 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2704 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2707 void* code = m_jit.copy();
2710 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2711 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2713 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2715 ctiRepatchCallByReturnAddress(returnAddress, code);
2718 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2720 #if USE(CTI_REPATCH_PIC)
2721 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2723 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2724 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2726 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2727 // referencing the prototype object - let's speculatively load it's table nice and early!)
2728 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_callFrame));
2729 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2730 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2732 // check eax is an object of the right StructureID.
2733 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2734 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2735 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2736 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2738 // Check the prototype object's StructureID had not changed.
2739 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2740 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2741 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2743 // Checks out okay! - getDirectOffset
2744 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2746 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2748 void* code = m_jit.copy();
2751 // Use the repatch information to link the failure cases back to the original slow case routine.
2752 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2753 X86Assembler::link(code, failureCases1, slowCaseBegin);
2754 X86Assembler::link(code, failureCases2, slowCaseBegin);
2755 X86Assembler::link(code, failureCases3, slowCaseBegin);
2757 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2758 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2759 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2761 // Track the stub we have created so that it will be deleted later.
2762 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2764 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2765 // FIXME: should revert this repatching, on failure.
2766 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2767 X86Assembler::repatchBranchOffset(jmpLocation, code);
2769 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2770 // referencing the prototype object - let's speculatively load it's table nice and early!)
2771 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_callFrame));
2772 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2773 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2775 // check eax is an object of the right StructureID.
2776 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2777 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2778 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2779 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2781 // Check the prototype object's StructureID had not changed.
2782 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2783 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2784 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2786 // Checks out okay! - getDirectOffset
2787 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2791 void* code = m_jit.copy();
2794 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2795 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2796 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2798 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2800 ctiRepatchCallByReturnAddress(returnAddress, code);
2804 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2808 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2810 // Check eax is an object of the right StructureID.
2811 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2812 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2813 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2814 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2816 StructureID* currStructureID = structureID;
2817 RefPtr<StructureID>* chainEntries = chain->head();
2818 JSObject* protoObject = 0;
2819 for (unsigned i = 0; i<count; ++i) {
2820 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_callFrame));
2821 currStructureID = chainEntries[i].get();
2823 // Check the prototype object's StructureID had not changed.
2824 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2825 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2826 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2828 ASSERT(protoObject);
2830 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2831 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2832 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2835 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2837 void* code = m_jit.copy();
2840 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2841 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2843 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2845 ctiRepatchCallByReturnAddress(returnAddress, code);
2848 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2850 // check eax is an object of the right StructureID.
2851 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2852 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2853 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2854 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2856 // checks out okay! - putDirectOffset
2857 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2858 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2861 void* code = m_jit.copy();
2864 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2865 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2867 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2869 ctiRepatchCallByReturnAddress(returnAddress, code);
2874 static JSValue* transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2876 baseObject->transitionTo(newStructureID);
2877 baseObject->putDirectOffset(cachedOffset, value);
2883 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2885 return oldStructureID->propertyStorageCapacity() != newStructureID->propertyStorageCapacity();
2888 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2890 Vector<X86Assembler::JmpSrc, 16> failureCases;
2891 // check eax is an object of the right StructureID.
2892 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2893 failureCases.append(m_jit.emitUnlinkedJne());
2894 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2895 failureCases.append(m_jit.emitUnlinkedJne());
2896 Vector<X86Assembler::JmpSrc> successCases;
2899 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2900 // proto(ecx) = baseObject->structureID()->prototype()
2901 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2902 failureCases.append(m_jit.emitUnlinkedJne());
2903 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2905 // ecx = baseObject->m_structureID
2906 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2907 // null check the prototype
2908 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2909 successCases.append(m_jit.emitUnlinkedJe());
2911 // Check the structure id
2912 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2913 failureCases.append(m_jit.emitUnlinkedJne());
2915 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2916 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2917 failureCases.append(m_jit.emitUnlinkedJne());
2918 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2921 failureCases.append(m_jit.emitUnlinkedJne());
2922 for (unsigned i = 0; i < successCases.size(); ++i)
2923 m_jit.link(successCases[i], m_jit.label());
2925 X86Assembler::JmpSrc callTarget;
2926 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2927 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2928 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2929 // codeblock should ensure oldStructureID->m_refCount > 0
2930 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2931 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2932 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2935 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2936 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2938 // Slow case transition -- we're going to need to quite a bit of work,
2939 // so just make a call
2940 m_jit.pushl_r(X86::edx);
2941 m_jit.pushl_r(X86::eax);
2942 m_jit.movl_i32r(cachedOffset, X86::eax);
2943 m_jit.pushl_r(X86::eax);
2944 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2945 m_jit.pushl_r(X86::eax);
2946 callTarget = m_jit.emitCall();
2947 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2951 X86Assembler::JmpSrc failureJump;
2952 if (failureCases.size()) {
2953 for (unsigned i = 0; i < failureCases.size(); ++i)
2954 m_jit.link(failureCases[i], m_jit.label());
2955 m_jit.emitRestoreArgumentReferenceForTrampoline();
2956 failureJump = m_jit.emitUnlinkedJmp();
2959 void* code = m_jit.copy();
2962 if (failureCases.size())
2963 X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2965 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2966 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2968 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2970 ctiRepatchCallByReturnAddress(returnAddress, code);
2973 void* CTI::privateCompileArrayLengthTrampoline()
2975 // Check eax is an array
2976 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2977 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2978 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2979 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2981 // Checks out okay! - get the length from the storage
2982 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2983 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2985 m_jit.addl_rr(X86::eax, X86::eax);
2986 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2987 m_jit.addl_i8r(1, X86::eax);
2991 void* code = m_jit.copy();
2994 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2995 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2996 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3001 void* CTI::privateCompileStringLengthTrampoline()
3003 // Check eax is a string
3004 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3005 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3006 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
3007 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3009 // Checks out okay! - get the length from the Ustring.
3010 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
3011 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
3013 m_jit.addl_rr(X86::eax, X86::eax);
3014 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
3015 m_jit.addl_i8r(1, X86::eax);
3019 void* code = m_jit.copy();
3022 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3023 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3024 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3029 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
3031 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
3033 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
3034 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
3035 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
3037 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
3038 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
3039 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
3042 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
3044 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
3046 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
3047 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
3048 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
3050 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
3051 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
3052 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
3055 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
3057 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
3059 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
3060 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
3062 // Check eax is an array
3063 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
3064 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
3065 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
3066 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
3068 // Checks out okay! - get the length from the storage
3069 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
3070 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
3072 m_jit.addl_rr(X86::ecx, X86::ecx);
3073 X86Assembler::JmpSrc failureClobberedECX = m_jit.emitUnlinkedJo();
3074 m_jit.addl_i8r(1, X86::ecx);
3076 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
3078 m_jit.link(failureClobberedECX, m_jit.label());
3079 m_jit.emitRestoreArgumentReference();
3080 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJmp();
3082 void* code = m_jit.copy();
3085 // Use the repatch information to link the failure cases back to the original slow case routine.
3086 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
3087 X86Assembler::link(code, failureCases1, slowCaseBegin);
3088 X86Assembler::link(code, failureCases2, slowCaseBegin);
3089 X86Assembler::link(code, failureCases3, slowCaseBegin);
3091 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3092 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3093 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3095 // Track the stub we have created so that it will be deleted later.
3096 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3098 // Finally repatch the jump to sow case back in the hot path to jump here instead.
3099 // FIXME: should revert this repatching, on failure.
3100 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3101 X86Assembler::repatchBranchOffset(jmpLocation, code);
3104 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
3106 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
3107 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
3108 m_jit.movl_mr(index * sizeof(Register), dst, dst);
3111 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
3113 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
3114 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
3115 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
3120 void* CTI::compileRegExp(Machine* machine, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
3122 // TODO: better error messages
3123 if (pattern.size() > MaxPatternSize) {
3124 *error_ptr = "regular expression too large";
3128 X86Assembler jit(machine->jitCodeBuffer());
3129 WRECParser parser(pattern, ignoreCase, multiline, jit);
3131 jit.emitConvertToFastCall();
3133 // Preserve regs & initialize outputRegister.
3134 jit.pushl_r(WRECGenerator::outputRegister);
3135 jit.pushl_r(WRECGenerator::currentValueRegister);
3136 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
3137 jit.pushl_r(WRECGenerator::currentPositionRegister);
3138 // load output pointer
3143 , X86::esp, WRECGenerator::outputRegister);
3145 // restart point on match fail.
3146 WRECGenerator::JmpDst nextLabel = jit.label();
3148 // (1) Parse Disjunction:
3150 // Parsing the disjunction should fully consume the pattern.
3151 JmpSrcVector failures;
3152 parser.parseDisjunction(failures);
3153 if (parser.isEndOfPattern()) {
3154 parser.m_err = WRECParser::Error_malformedPattern;
3157 // TODO: better error messages
3158 *error_ptr = "TODO: better error messages";
3163 // Set return value & pop registers from the stack.
3165 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
3166 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
3168 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
3169 jit.popl_r(X86::eax);
3170 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3171 jit.popl_r(WRECGenerator::currentValueRegister);
3172 jit.popl_r(WRECGenerator::outputRegister);
3175 jit.link(noOutput, jit.label());
3177 jit.popl_r(X86::eax);
3178 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3179 jit.popl_r(WRECGenerator::currentValueRegister);
3180 jit.popl_r(WRECGenerator::outputRegister);
3184 // All fails link to here. Progress the start point & if it is within scope, loop.
3185 // Otherwise, return fail value.
3186 WRECGenerator::JmpDst here = jit.label();
3187 for (unsigned i = 0; i < failures.size(); ++i)
3188 jit.link(failures[i], here);
3191 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
3192 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
3193 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
3194 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
3195 jit.link(jit.emitUnlinkedJle(), nextLabel);
3197 jit.addl_i8r(4, X86::esp);
3199 jit.movl_i32r(-1, X86::eax);
3200 jit.popl_r(WRECGenerator::currentValueRegister);
3201 jit.popl_r(WRECGenerator::outputRegister);
3204 *numSubpatterns_ptr = parser.m_numSubpatterns;
3206 void* code = jit.copy();
3211 #endif // ENABLE(WREC)
3215 #endif // ENABLE(CTI)