2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "JSFunction.h"
35 #include "wrec/WREC.h"
36 #include "ResultType.h"
38 #include <sys/sysctl.h>
48 return true; // All X86 Macs are guaranteed to support at least SSE2
53 static const int SSE2FeatureBit = 1 << 26;
60 mov eax, 1 // cpuid function 1 gives us the standard feature set
66 // FIXME: Add GCC code to do above asm
68 present = (flags & SSE2FeatureBit) != 0;
72 static SSE2Check check;
77 #if COMPILER(GCC) && PLATFORM(X86)
80 ".globl _ctiTrampoline" "\n"
81 "_ctiTrampoline:" "\n"
84 "subl $0x24, %esp" "\n"
85 "movl $512, %esi" "\n"
86 "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = CTI_ARGS_r
87 "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
88 "addl $0x24, %esp" "\n"
95 ".globl _ctiVMThrowTrampoline" "\n"
96 "_ctiVMThrowTrampoline:" "\n"
97 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
98 "addl $0x24, %esp" "\n"
108 __declspec(naked) JSValue* ctiTrampoline(void* code, RegisterFile*, Register*, JSValue** exception, Profiler**, JSGlobalData*)
116 mov edi, [esp + 0x38];
125 __declspec(naked) void ctiVMThrowTrampoline()
129 call JSC::Machine::cti_vm_throw;
142 ALWAYS_INLINE bool CTI::isConstant(int src)
144 return src >= m_codeBlock->numVars && src < m_codeBlock->numVars + m_codeBlock->numConstants;
147 ALWAYS_INLINE JSValue* CTI::getConstant(ExecState* exec, int src)
149 return m_codeBlock->constantRegisters[src - m_codeBlock->numVars].jsValue(exec);
152 // get arg puts an arg from the SF register array into a h/w register
153 ALWAYS_INLINE void CTI::emitGetArg(int src, X86Assembler::RegisterID dst)
155 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
156 if (isConstant(src)) {
157 JSValue* js = getConstant(m_exec, src);
158 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
160 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
163 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
164 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
166 if (isConstant(src)) {
167 JSValue* js = getConstant(m_exec, src);
168 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
170 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
171 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
175 // puts an arg onto the stack, as an arg to a context threaded function.
176 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
178 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
181 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
183 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
186 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
188 if (isConstant(src)) {
189 JSValue* js = getConstant(m_exec, src);
190 return JSImmediate::isNumber(js) ? js : 0;
195 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
197 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
200 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
202 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
205 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
207 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
210 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
212 m_jit.movl_rm(from, entry * sizeof(Register), X86::edi);
215 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
217 m_jit.movl_mr(entry * sizeof(Register), X86::edi, to);
220 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
222 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
223 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
226 ALWAYS_INLINE void CTI::emitInitRegister(unsigned dst)
228 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
229 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
232 #if ENABLE(SAMPLING_TOOL)
233 unsigned inCalledCode = 0;
236 void ctiSetReturnAddress(void** where, void* what)
241 void ctiRepatchCallByReturnAddress(void* where, void* what)
243 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
248 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
251 if (isConstant(src1)) {
252 JSValue* js = getConstant(m_exec, src1);
254 JSImmediate::isImmediate(js) ?
255 (JSImmediate::isNumber(js) ? 'i' :
256 JSImmediate::isBoolean(js) ? 'b' :
257 js->isUndefined() ? 'u' :
258 js->isNull() ? 'n' : '?')
260 (js->isString() ? 's' :
261 js->isObject() ? 'o' :
265 if (isConstant(src2)) {
266 JSValue* js = getConstant(m_exec, src2);
268 JSImmediate::isImmediate(js) ?
269 (JSImmediate::isNumber(js) ? 'i' :
270 JSImmediate::isBoolean(js) ? 'b' :
271 js->isUndefined() ? 'u' :
272 js->isNull() ? 'n' : '?')
274 (js->isString() ? 's' :
275 js->isObject() ? 'o' :
278 if ((which1 != '*') | (which2 != '*'))
279 fprintf(stderr, "Types %c %c\n", which1, which2);
284 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, X86::RegisterID r)
286 m_jit.emitRestoreArgumentReference();
287 X86Assembler::JmpSrc call = m_jit.emitCall(r);
288 m_calls.append(CallRecord(call, opcodeIndex));
293 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
295 #if ENABLE(SAMPLING_TOOL)
296 m_jit.movl_i32m(1, &inCalledCode);
298 m_jit.emitRestoreArgumentReference();
299 X86Assembler::JmpSrc call = m_jit.emitCall();
300 m_calls.append(CallRecord(call, helper, opcodeIndex));
301 #if ENABLE(SAMPLING_TOOL)
302 m_jit.movl_i32m(0, &inCalledCode);
308 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
310 #if ENABLE(SAMPLING_TOOL)
311 m_jit.movl_i32m(1, &inCalledCode);
313 m_jit.emitRestoreArgumentReference();
314 X86Assembler::JmpSrc call = m_jit.emitCall();
315 m_calls.append(CallRecord(call, helper, opcodeIndex));
316 #if ENABLE(SAMPLING_TOOL)
317 m_jit.movl_i32m(0, &inCalledCode);
323 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
325 #if ENABLE(SAMPLING_TOOL)
326 m_jit.movl_i32m(1, &inCalledCode);
328 m_jit.emitRestoreArgumentReference();
329 X86Assembler::JmpSrc call = m_jit.emitCall();
330 m_calls.append(CallRecord(call, helper, opcodeIndex));
331 #if ENABLE(SAMPLING_TOOL)
332 m_jit.movl_i32m(0, &inCalledCode);
338 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
340 #if ENABLE(SAMPLING_TOOL)
341 m_jit.movl_i32m(1, &inCalledCode);
343 m_jit.emitRestoreArgumentReference();
344 X86Assembler::JmpSrc call = m_jit.emitCall();
345 m_calls.append(CallRecord(call, helper, opcodeIndex));
346 #if ENABLE(SAMPLING_TOOL)
347 m_jit.movl_i32m(0, &inCalledCode);
353 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
355 #if ENABLE(SAMPLING_TOOL)
356 m_jit.movl_i32m(1, &inCalledCode);
358 m_jit.emitRestoreArgumentReference();
359 X86Assembler::JmpSrc call = m_jit.emitCall();
360 m_calls.append(CallRecord(call, helper, opcodeIndex));
361 #if ENABLE(SAMPLING_TOOL)
362 m_jit.movl_i32m(0, &inCalledCode);
368 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_2 helper)
370 #if ENABLE(SAMPLING_TOOL)
371 m_jit.movl_i32m(1, &inCalledCode);
373 m_jit.emitRestoreArgumentReference();
374 X86Assembler::JmpSrc call = m_jit.emitCall();
375 m_calls.append(CallRecord(call, helper, opcodeIndex));
376 #if ENABLE(SAMPLING_TOOL)
377 m_jit.movl_i32m(0, &inCalledCode);
383 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
385 m_jit.testl_i32r(JSImmediate::TagMask, reg);
386 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
389 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
391 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
392 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
395 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
397 m_jit.movl_rr(reg1, X86::ecx);
398 m_jit.andl_rr(reg2, X86::ecx);
399 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
402 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
404 ASSERT(JSImmediate::isNumber(imm));
405 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
408 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
410 // op_mod relies on this being a sub - setting zf if result is 0.
411 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
414 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
416 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
419 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
421 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
424 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
426 m_jit.sarl_i8r(1, reg);
429 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
431 m_jit.addl_rr(reg, reg);
432 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
433 emitFastArithReTagImmediate(reg);
436 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
438 m_jit.addl_rr(reg, reg);
439 emitFastArithReTagImmediate(reg);
442 ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
444 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
445 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
448 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
449 : m_jit(machine->jitCodeBuffer())
452 , m_codeBlock(codeBlock)
453 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
454 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
458 #define CTI_COMPILE_BINARY_OP(name) \
460 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
461 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
462 emitCall(i, Machine::cti_##name); \
463 emitPutResult(instruction[i + 1].u.operand); \
468 #define CTI_COMPILE_UNARY_OP(name) \
470 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
471 emitCall(i, Machine::cti_##name); \
472 emitPutResult(instruction[i + 1].u.operand); \
477 #if ENABLE(SAMPLING_TOOL)
478 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
481 void CTI::compileOpCallInitializeCallFrame(unsigned callee, unsigned argCount)
483 emitGetArg(callee, X86::ecx); // Load callee JSFunction into ecx
484 m_jit.movl_rm(X86::eax, RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edx); // callee CodeBlock was returned in eax
485 m_jit.movl_i32m(reinterpret_cast<unsigned>(nullJSValue), RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)), X86::edx);
486 m_jit.movl_rm(X86::ecx, RegisterFile::Callee * static_cast<int>(sizeof(Register)), X86::edx);
488 m_jit.movl_mr(OBJECT_OFFSET(JSFunction, m_scopeChain) + OBJECT_OFFSET(ScopeChain, m_node), X86::ecx, X86::ecx); // newScopeChain
489 m_jit.movl_i32m(argCount, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)), X86::edx);
490 m_jit.movl_rm(X86::edi, RegisterFile::CallerRegisters * static_cast<int>(sizeof(Register)), X86::edx);
491 m_jit.movl_rm(X86::ecx, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)), X86::edx);
494 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
496 int dst = instruction[i + 1].u.operand;
497 int callee = instruction[i + 2].u.operand;
498 int firstArg = instruction[i + 4].u.operand;
499 int argCount = instruction[i + 5].u.operand;
500 int registerOffset = instruction[i + 6].u.operand;
502 if (type == OpCallEval)
503 emitGetPutArg(instruction[i + 3].u.operand, 16, X86::ecx);
505 if (type == OpConstruct) {
506 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 20);
507 emitPutArgConstant(argCount, 16);
508 emitPutArgConstant(registerOffset, 12);
509 emitPutArgConstant(firstArg, 8);
510 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
512 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 12);
513 emitPutArgConstant(argCount, 8);
514 emitPutArgConstant(registerOffset, 4);
516 int thisVal = instruction[i + 3].u.operand;
517 if (thisVal == missingThisObjectMarker()) {
518 // FIXME: should this be loaded dynamically off m_exec?
519 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_exec->globalThisValue()), firstArg * sizeof(Register), X86::edi);
521 emitGetArg(thisVal, X86::ecx);
522 emitPutResult(firstArg, X86::ecx);
526 X86Assembler::JmpSrc wasEval;
527 if (type == OpCallEval) {
528 emitGetPutArg(callee, 0, X86::ecx);
529 emitCall(i, Machine::cti_op_call_eval);
531 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
532 wasEval = m_jit.emitUnlinkedJne();
534 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
535 emitGetArg(callee, X86::ecx);
537 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
538 emitGetPutArg(callee, 0, X86::ecx);
541 // Fast check for JS function.
542 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
543 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
544 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
545 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
546 m_jit.link(isNotObject, m_jit.label());
548 // This handles host functions
549 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
551 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
552 m_jit.link(isJSFunction, m_jit.label());
554 // This handles JSFunctions
555 emitCall(i, (type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction);
557 compileOpCallInitializeCallFrame(callee, argCount);
559 // load ctiCode from the new codeBlock.
560 m_jit.movl_mr(OBJECT_OFFSET(CodeBlock, ctiCode), X86::eax, X86::eax);
562 // Setup the new value of 'r' in edi, and on the stack, too.
563 emitPutCTIParam(X86::edx, CTI_ARGS_r);
564 m_jit.movl_rr(X86::edx, X86::edi);
566 // Check the ctiCode has been generated - if not, this is handled in a slow case.
567 m_jit.testl_rr(X86::eax, X86::eax);
568 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
569 emitCall(i, X86::eax);
571 X86Assembler::JmpDst end = m_jit.label();
572 m_jit.link(wasNotJSFunction, end);
573 if (type == OpCallEval)
574 m_jit.link(wasEval, end);
576 // Put the return value in dst. In the interpreter, op_ret does this.
580 void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
582 bool negated = (type == OpNStrictEq);
584 unsigned dst = instruction[i + 1].u.operand;
585 unsigned src1 = instruction[i + 2].u.operand;
586 unsigned src2 = instruction[i + 3].u.operand;
588 emitGetArg(src1, X86::eax);
589 emitGetArg(src2, X86::edx);
591 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
592 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
593 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
594 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
596 m_jit.cmpl_rr(X86::edx, X86::eax);
598 m_jit.setne_r(X86::eax);
600 m_jit.sete_r(X86::eax);
601 m_jit.movzbl_rr(X86::eax, X86::eax);
602 emitTagAsBoolImmediate(X86::eax);
604 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
606 m_jit.link(firstNotImmediate, m_jit.label());
608 // check that edx is immediate but not the zero immediate
609 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
610 m_jit.setz_r(X86::ecx);
611 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
612 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
613 m_jit.sete_r(X86::edx);
614 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
615 m_jit.orl_rr(X86::ecx, X86::edx);
617 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
619 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
621 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
623 m_jit.link(secondNotImmediate, m_jit.label());
624 // check that eax is not the zero immediate (we know it must be immediate)
625 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
626 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
628 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
630 m_jit.link(bothWereImmediates, m_jit.label());
631 m_jit.link(firstWasNotImmediate, m_jit.label());
636 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
638 m_jit.subl_i8r(1, X86::esi);
639 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
640 emitCall(opcodeIndex, Machine::cti_timeout_check);
642 emitGetCTIParam(CTI_ARGS_globalData, X86::ecx);
643 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
644 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
645 m_jit.link(skipTimeout, m_jit.label());
649 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
651 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
652 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
654 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
655 control will fall through from the code planted.
657 void CTI::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
659 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
660 m_jit.cvttsd2si_rr(xmmSource, tempReg1);
661 m_jit.addl_rr(tempReg1, tempReg1);
662 m_jit.sarl_i8r(1, tempReg1);
663 m_jit.cvtsi2sd_rr(tempReg1, tempXmm);
664 // Compare & branch if immediate.
665 m_jit.ucomis_rr(tempXmm, xmmSource);
666 X86Assembler::JmpSrc resultIsImm = m_jit.emitUnlinkedJe();
667 X86Assembler::JmpDst resultLookedLikeImmButActuallyIsnt = m_jit.label();
669 // Store the result to the JSNumberCell and jump.
670 m_jit.movsd_rm(xmmSource, OBJECT_OFFSET(JSNumberCell, m_value), jsNumberCell);
671 emitPutResult(dst, jsNumberCell);
672 *wroteJSNumberCell = m_jit.emitUnlinkedJmp();
674 m_jit.link(resultIsImm, m_jit.label());
675 // value == (double)(JSImmediate)value... or at least, it looks that way...
676 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
677 m_jit.link(m_jit.emitUnlinkedJp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
678 m_jit.pextrw_irr(3, xmmSource, tempReg2);
679 m_jit.cmpl_i32r(0x8000, tempReg2);
680 m_jit.link(m_jit.emitUnlinkedJe(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
681 // Yes it really really really is representable as a JSImmediate.
682 emitFastArithIntToImmNoCheck(tempReg1);
683 emitPutResult(dst, X86::ecx);
686 void CTI::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
688 StructureID* numberStructureID = m_exec->globalData().numberStructureID.get();
689 X86Assembler::JmpSrc wasJSNumberCell1, wasJSNumberCell1b, wasJSNumberCell2, wasJSNumberCell2b;
691 emitGetArg(src1, X86::eax);
692 emitGetArg(src2, X86::edx);
694 if (types.second().isReusable() && isSSE2Present()) {
695 ASSERT(types.second().mightBeNumber());
697 // Check op2 is a number
698 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
699 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
700 if (!types.second().definitelyIsNumber()) {
701 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
702 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
703 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
706 // (1) In this case src2 is a reusable number cell.
707 // Slow case if src1 is not a number type.
708 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
709 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
710 if (!types.first().definitelyIsNumber()) {
711 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
712 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
713 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
716 // (1a) if we get here, src1 is also a number cell
717 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
718 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
719 // (1b) if we get here, src1 is an immediate
720 m_jit.link(op1imm, m_jit.label());
721 emitFastArithImmToInt(X86::eax);
722 m_jit.cvtsi2sd_rr(X86::eax, X86::xmm0);
724 m_jit.link(loadedDouble, m_jit.label());
725 if (opcodeID == op_add)
726 m_jit.addsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
727 else if (opcodeID == op_sub)
728 m_jit.subsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
730 ASSERT(opcodeID == op_mul);
731 m_jit.mulsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
734 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
735 wasJSNumberCell2b = m_jit.emitUnlinkedJmp();
737 // (2) This handles cases where src2 is an immediate number.
738 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
739 m_jit.link(op2imm, m_jit.label());
740 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
741 } else if (types.first().isReusable() && isSSE2Present()) {
742 ASSERT(types.first().mightBeNumber());
744 // Check op1 is a number
745 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
746 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
747 if (!types.first().definitelyIsNumber()) {
748 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
749 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
750 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
753 // (1) In this case src1 is a reusable number cell.
754 // Slow case if src2 is not a number type.
755 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
756 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
757 if (!types.second().definitelyIsNumber()) {
758 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
759 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
760 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
763 // (1a) if we get here, src2 is also a number cell
764 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
765 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
766 // (1b) if we get here, src2 is an immediate
767 m_jit.link(op2imm, m_jit.label());
768 emitFastArithImmToInt(X86::edx);
769 m_jit.cvtsi2sd_rr(X86::edx, X86::xmm1);
771 m_jit.link(loadedDouble, m_jit.label());
772 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
773 if (opcodeID == op_add)
774 m_jit.addsd_rr(X86::xmm1, X86::xmm0);
775 else if (opcodeID == op_sub)
776 m_jit.subsd_rr(X86::xmm1, X86::xmm0);
778 ASSERT(opcodeID == op_mul);
779 m_jit.mulsd_rr(X86::xmm1, X86::xmm0);
781 m_jit.movsd_rm(X86::xmm0, OBJECT_OFFSET(JSNumberCell, m_value), X86::eax);
784 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
785 wasJSNumberCell1b = m_jit.emitUnlinkedJmp();
787 // (2) This handles cases where src1 is an immediate number.
788 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
789 m_jit.link(op1imm, m_jit.label());
790 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
792 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
794 if (opcodeID == op_add) {
795 emitFastArithDeTagImmediate(X86::eax);
796 m_jit.addl_rr(X86::edx, X86::eax);
797 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
798 } else if (opcodeID == op_sub) {
799 m_jit.subl_rr(X86::edx, X86::eax);
800 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
801 emitFastArithReTagImmediate(X86::eax);
803 ASSERT(opcodeID == op_mul);
804 emitFastArithDeTagImmediate(X86::eax);
805 emitFastArithImmToInt(X86::edx);
806 m_jit.imull_rr(X86::edx, X86::eax);
807 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
808 emitFastArithReTagImmediate(X86::eax);
812 if (types.second().isReusable() && isSSE2Present()) {
813 m_jit.link(wasJSNumberCell2, m_jit.label());
814 m_jit.link(wasJSNumberCell2b, m_jit.label());
816 else if (types.first().isReusable() && isSSE2Present()) {
817 m_jit.link(wasJSNumberCell1, m_jit.label());
818 m_jit.link(wasJSNumberCell1b, m_jit.label());
822 void CTI::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
824 X86Assembler::JmpDst here = m_jit.label();
825 m_jit.link(iter->from, here);
826 if (types.second().isReusable() && isSSE2Present()) {
827 if (!types.first().definitelyIsNumber()) {
828 m_jit.link((++iter)->from, here);
829 m_jit.link((++iter)->from, here);
831 if (!types.second().definitelyIsNumber()) {
832 m_jit.link((++iter)->from, here);
833 m_jit.link((++iter)->from, here);
835 m_jit.link((++iter)->from, here);
836 } else if (types.first().isReusable() && isSSE2Present()) {
837 if (!types.first().definitelyIsNumber()) {
838 m_jit.link((++iter)->from, here);
839 m_jit.link((++iter)->from, here);
841 if (!types.second().definitelyIsNumber()) {
842 m_jit.link((++iter)->from, here);
843 m_jit.link((++iter)->from, here);
845 m_jit.link((++iter)->from, here);
847 m_jit.link((++iter)->from, here);
849 emitGetPutArg(src1, 0, X86::ecx);
850 emitGetPutArg(src2, 4, X86::ecx);
851 if (opcodeID == op_add)
852 emitCall(i, Machine::cti_op_add);
853 else if (opcodeID == op_sub)
854 emitCall(i, Machine::cti_op_sub);
856 ASSERT(opcodeID == op_mul);
857 emitCall(i, Machine::cti_op_mul);
862 void CTI::privateCompileMainPass()
864 Instruction* instruction = m_codeBlock->instructions.begin();
865 unsigned instructionCount = m_codeBlock->instructions.size();
867 unsigned structureIDInstructionIndex = 0;
869 for (unsigned i = 0; i < instructionCount; ) {
870 m_labels[i] = m_jit.label();
872 #if ENABLE(SAMPLING_TOOL)
873 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
876 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
877 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
879 unsigned src = instruction[i + 2].u.operand;
881 m_jit.movl_i32r(reinterpret_cast<unsigned>(getConstant(m_exec, src)), X86::edx);
883 emitGetArg(src, X86::edx);
884 emitPutResult(instruction[i + 1].u.operand, X86::edx);
889 unsigned dst = instruction[i + 1].u.operand;
890 unsigned src1 = instruction[i + 2].u.operand;
891 unsigned src2 = instruction[i + 3].u.operand;
893 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
894 emitGetArg(src2, X86::edx);
895 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
896 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
897 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
898 emitPutResult(dst, X86::edx);
899 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
900 emitGetArg(src1, X86::eax);
901 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
902 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
903 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
906 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
907 if (types.first().mightBeNumber() && types.second().mightBeNumber())
908 compileBinaryArithOp(op_add, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
910 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
911 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
912 emitCall(i, Machine::cti_op_add);
913 emitPutResult(instruction[i + 1].u.operand);
921 if (m_codeBlock->needsFullScopeChain)
922 emitCall(i, Machine::cti_op_end);
923 emitGetArg(instruction[i + 1].u.operand, X86::eax);
924 #if ENABLE(SAMPLING_TOOL)
925 m_jit.movl_i32m(-1, ¤tOpcodeID);
927 m_jit.pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
933 unsigned target = instruction[i + 1].u.operand;
934 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
939 int srcDst = instruction[i + 1].u.operand;
940 emitGetArg(srcDst, X86::eax);
941 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
942 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
943 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
944 emitPutResult(srcDst, X86::eax);
949 emitSlowScriptCheck(i);
951 unsigned target = instruction[i + 1].u.operand;
952 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
956 case op_loop_if_less: {
957 emitSlowScriptCheck(i);
959 unsigned target = instruction[i + 3].u.operand;
960 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
962 emitGetArg(instruction[i + 1].u.operand, X86::edx);
963 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
964 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
965 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
967 emitGetArg(instruction[i + 1].u.operand, X86::eax);
968 emitGetArg(instruction[i + 2].u.operand, X86::edx);
969 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
970 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
971 m_jit.cmpl_rr(X86::edx, X86::eax);
972 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
977 case op_loop_if_lesseq: {
978 emitSlowScriptCheck(i);
980 unsigned target = instruction[i + 3].u.operand;
981 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
983 emitGetArg(instruction[i + 1].u.operand, X86::edx);
984 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
985 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
986 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
988 emitGetArg(instruction[i + 1].u.operand, X86::eax);
989 emitGetArg(instruction[i + 2].u.operand, X86::edx);
990 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
991 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
992 m_jit.cmpl_rr(X86::edx, X86::eax);
993 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
998 case op_new_object: {
999 emitCall(i, Machine::cti_op_new_object);
1000 emitPutResult(instruction[i + 1].u.operand);
1004 case op_put_by_id: {
1005 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
1006 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1007 // such that the StructureID & offset are always at the same distance from this.
1009 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1010 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1012 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1013 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1014 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1015 ++structureIDInstructionIndex;
1017 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
1018 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1019 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1020 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1021 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
1022 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1024 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1025 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1026 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
1027 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
1032 case op_get_by_id: {
1033 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
1034 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
1035 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1036 // to jump back to if one of these trampolies finds a match.
1038 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1040 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1042 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1043 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1044 ++structureIDInstructionIndex;
1046 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1047 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1048 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
1049 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1050 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
1052 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1053 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
1054 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
1055 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1060 case op_instanceof: {
1061 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
1062 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
1063 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
1065 // check if any are immediates
1066 m_jit.orl_rr(X86::eax, X86::ecx);
1067 m_jit.orl_rr(X86::edx, X86::ecx);
1068 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
1070 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
1072 // check that all are object type - this is a bit of a bithack to avoid excess branching;
1073 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
1074 // this works because NumberType and StringType are smaller
1075 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
1076 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
1077 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1078 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
1079 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
1080 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
1081 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1082 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
1084 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1086 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
1087 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
1088 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
1089 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
1091 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1093 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
1094 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
1096 // optimistically load true result
1097 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(true)), X86::eax);
1099 X86Assembler::JmpDst loop = m_jit.label();
1101 // load value's prototype
1102 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
1103 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
1105 m_jit.cmpl_rr(X86::ecx, X86::edx);
1106 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
1108 m_jit.cmpl_i32r(reinterpret_cast<int32_t>(jsNull()), X86::ecx);
1109 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
1110 m_jit.link(goToLoop, loop);
1112 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(false)), X86::eax);
1114 m_jit.link(exit, m_jit.label());
1116 emitPutResult(instruction[i + 1].u.operand);
1121 case op_del_by_id: {
1122 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1123 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1124 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1125 emitCall(i, Machine::cti_op_del_by_id);
1126 emitPutResult(instruction[i + 1].u.operand);
1131 unsigned dst = instruction[i + 1].u.operand;
1132 unsigned src1 = instruction[i + 2].u.operand;
1133 unsigned src2 = instruction[i + 3].u.operand;
1135 if (JSValue* src1Value = getConstantImmediateNumericArg(src1)) {
1136 emitGetArg(src2, X86::eax);
1137 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1138 emitFastArithImmToInt(X86::eax);
1139 m_jit.imull_i32r(X86::eax, getDeTaggedConstantImmediate(src1Value), X86::eax);
1140 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1141 emitFastArithReTagImmediate(X86::eax);
1143 } else if (JSValue* src2Value = getConstantImmediateNumericArg(src2)) {
1144 emitGetArg(src1, X86::eax);
1145 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1146 emitFastArithImmToInt(X86::eax);
1147 m_jit.imull_i32r(X86::eax, getDeTaggedConstantImmediate(src2Value), X86::eax);
1148 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1149 emitFastArithReTagImmediate(X86::eax);
1152 compileBinaryArithOp(op_mul, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1158 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
1159 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1160 emitCall(i, Machine::cti_op_new_func);
1161 emitPutResult(instruction[i + 1].u.operand);
1166 compileOpCall(instruction, i);
1170 case op_get_global_var: {
1171 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
1172 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1173 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
1174 emitPutResult(instruction[i + 1].u.operand, X86::eax);
1178 case op_put_global_var: {
1179 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
1180 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1181 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1182 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
1186 case op_get_scoped_var: {
1187 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
1189 emitGetArg(RegisterFile::ScopeChain, X86::eax);
1191 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
1193 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
1194 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
1195 emitPutResult(instruction[i + 1].u.operand);
1199 case op_put_scoped_var: {
1200 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
1202 emitGetArg(RegisterFile::ScopeChain, X86::edx);
1203 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1205 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
1207 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
1208 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
1212 case op_tear_off_activation: {
1213 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1214 emitCall(i, Machine::cti_op_tear_off_activation);
1218 case op_tear_off_arguments: {
1219 emitCall(i, Machine::cti_op_tear_off_arguments);
1224 // Check for a profiler - if there is one, jump to the hook below.
1225 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
1226 m_jit.cmpl_i32m(0, X86::eax);
1227 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
1228 X86Assembler::JmpDst profiled = m_jit.label();
1230 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
1231 if (m_codeBlock->needsFullScopeChain)
1232 emitCall(i, Machine::cti_op_ret_scopeChain);
1234 // Return the result in %eax.
1235 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1237 // Grab the return address.
1238 emitGetArg(RegisterFile::ReturnPC, X86::edx);
1240 // Restore our caller's "r".
1241 emitGetArg(RegisterFile::CallerRegisters, X86::edi);
1242 emitPutCTIParam(X86::edi, CTI_ARGS_r);
1245 m_jit.pushl_r(X86::edx);
1249 m_jit.link(profile, m_jit.label());
1250 emitCall(i, Machine::cti_op_ret_profiler);
1251 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
1256 case op_new_array: {
1257 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1258 emitPutArg(X86::edx, 0);
1259 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1260 emitCall(i, Machine::cti_op_new_array);
1261 emitPutResult(instruction[i + 1].u.operand);
1266 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1267 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1268 emitCall(i, Machine::cti_op_resolve);
1269 emitPutResult(instruction[i + 1].u.operand);
1273 case op_construct: {
1274 compileOpCall(instruction, i, OpConstruct);
1278 case op_construct_verify: {
1279 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1281 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1282 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1283 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1284 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1285 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1287 m_jit.link(isImmediate, m_jit.label());
1288 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
1289 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1290 m_jit.link(isObject, m_jit.label());
1295 case op_get_by_val: {
1296 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1297 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1298 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1299 emitFastArithImmToInt(X86::edx);
1300 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1301 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1302 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1303 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1305 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1306 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1307 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1308 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1310 // Get the value from the vector
1311 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1312 emitPutResult(instruction[i + 1].u.operand);
1316 case op_resolve_func: {
1317 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1318 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1319 emitCall(i, Machine::cti_op_resolve_func);
1320 emitPutResult(instruction[i + 1].u.operand);
1321 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1326 compileBinaryArithOp(op_sub, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1330 case op_put_by_val: {
1331 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1332 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1333 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1334 emitFastArithImmToInt(X86::edx);
1335 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1336 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1337 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1338 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1340 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1341 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1342 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1343 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1344 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1345 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1346 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1348 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1349 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1350 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1351 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1353 // All good - put the value into the array.
1354 m_jit.link(inFastVector, m_jit.label());
1355 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1356 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1360 CTI_COMPILE_BINARY_OP(op_lesseq)
1361 case op_loop_if_true: {
1362 emitSlowScriptCheck(i);
1364 unsigned target = instruction[i + 2].u.operand;
1365 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1367 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1368 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1369 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1370 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1372 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1373 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1374 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1375 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1377 m_jit.link(isZero, m_jit.label());
1381 case op_resolve_base: {
1382 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1383 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1384 emitCall(i, Machine::cti_op_resolve_base);
1385 emitPutResult(instruction[i + 1].u.operand);
1390 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1391 emitCall(i, Machine::cti_op_negate);
1392 emitPutResult(instruction[i + 1].u.operand);
1396 case op_resolve_skip: {
1397 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1398 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1399 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1400 emitCall(i, Machine::cti_op_resolve_skip);
1401 emitPutResult(instruction[i + 1].u.operand);
1405 case op_resolve_global: {
1407 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1408 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1409 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1410 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1412 // Check StructureID of global object
1413 m_jit.movl_i32r(globalObject, X86::eax);
1414 m_jit.movl_mr(structureIDAddr, X86::edx);
1415 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1416 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1417 m_slowCases.append(SlowCaseEntry(slowCase, i));
1419 // Load cached property
1420 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1421 m_jit.movl_mr(offsetAddr, X86::edx);
1422 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1423 emitPutResult(instruction[i + 1].u.operand);
1424 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1427 m_jit.link(slowCase, m_jit.label());
1428 emitPutArgConstant(globalObject, 0);
1429 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1430 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1431 emitCall(i, Machine::cti_op_resolve_global);
1432 emitPutResult(instruction[i + 1].u.operand);
1433 m_jit.link(end, m_jit.label());
1435 ++structureIDInstructionIndex;
1438 CTI_COMPILE_BINARY_OP(op_div)
1440 int srcDst = instruction[i + 1].u.operand;
1441 emitGetArg(srcDst, X86::eax);
1442 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1443 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1444 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1445 emitPutResult(srcDst, X86::eax);
1450 unsigned target = instruction[i + 3].u.operand;
1451 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1453 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1454 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1455 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1456 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1458 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1459 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1460 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1461 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1462 m_jit.cmpl_rr(X86::edx, X86::eax);
1463 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1469 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1470 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1471 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1472 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1473 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1474 emitPutResult(instruction[i + 1].u.operand);
1479 unsigned target = instruction[i + 2].u.operand;
1480 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1482 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1483 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1484 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1485 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1487 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1488 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1489 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1490 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1492 m_jit.link(isNonZero, m_jit.label());
1497 int srcDst = instruction[i + 2].u.operand;
1498 emitGetArg(srcDst, X86::eax);
1499 m_jit.movl_rr(X86::eax, X86::edx);
1500 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1501 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1502 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1503 emitPutResult(srcDst, X86::edx);
1504 emitPutResult(instruction[i + 1].u.operand);
1508 case op_unexpected_load: {
1509 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1510 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1511 emitPutResult(instruction[i + 1].u.operand);
1516 int retAddrDst = instruction[i + 1].u.operand;
1517 int target = instruction[i + 2].u.operand;
1518 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1519 X86Assembler::JmpDst addrPosition = m_jit.label();
1520 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1521 X86Assembler::JmpDst sretTarget = m_jit.label();
1522 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1527 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1532 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1533 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1534 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1535 m_jit.cmpl_rr(X86::edx, X86::eax);
1536 m_jit.sete_r(X86::eax);
1537 m_jit.movzbl_rr(X86::eax, X86::eax);
1538 emitTagAsBoolImmediate(X86::eax);
1539 emitPutResult(instruction[i + 1].u.operand);
1544 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1545 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1546 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1547 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1548 emitFastArithImmToInt(X86::eax);
1549 emitFastArithImmToInt(X86::ecx);
1550 m_jit.shll_CLr(X86::eax);
1551 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1552 emitPutResult(instruction[i + 1].u.operand);
1557 unsigned src1 = instruction[i + 2].u.operand;
1558 unsigned src2 = instruction[i + 3].u.operand;
1559 unsigned dst = instruction[i + 1].u.operand;
1560 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1561 emitGetArg(src2, X86::eax);
1562 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1563 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1565 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1566 emitGetArg(src1, X86::eax);
1567 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1568 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1571 emitGetArg(src1, X86::eax);
1572 emitGetArg(src2, X86::edx);
1573 m_jit.andl_rr(X86::edx, X86::eax);
1574 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1581 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1582 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1583 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1584 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1585 emitFastArithImmToInt(X86::ecx);
1586 m_jit.sarl_CLr(X86::eax);
1587 emitFastArithPotentiallyReTagImmediate(X86::eax);
1588 emitPutResult(instruction[i + 1].u.operand);
1593 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1594 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1595 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1596 emitPutResult(instruction[i + 1].u.operand);
1600 case op_resolve_with_base: {
1601 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1602 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1603 emitCall(i, Machine::cti_op_resolve_with_base);
1604 emitPutResult(instruction[i + 1].u.operand);
1605 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1609 case op_new_func_exp: {
1610 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1611 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1612 emitCall(i, Machine::cti_op_new_func_exp);
1613 emitPutResult(instruction[i + 1].u.operand);
1618 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1619 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1620 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1621 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1622 emitFastArithDeTagImmediate(X86::eax);
1623 emitFastArithDeTagImmediate(X86::ecx);
1624 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1626 m_jit.idivl_r(X86::ecx);
1627 emitFastArithReTagImmediate(X86::edx);
1628 m_jit.movl_rr(X86::edx, X86::eax);
1629 emitPutResult(instruction[i + 1].u.operand);
1634 unsigned target = instruction[i + 2].u.operand;
1635 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1637 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1638 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1639 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1640 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1642 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1643 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1644 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1645 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1647 m_jit.link(isZero, m_jit.label());
1651 CTI_COMPILE_BINARY_OP(op_less)
1653 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1654 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1655 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1656 m_jit.cmpl_rr(X86::eax, X86::edx);
1658 m_jit.setne_r(X86::eax);
1659 m_jit.movzbl_rr(X86::eax, X86::eax);
1660 emitTagAsBoolImmediate(X86::eax);
1662 emitPutResult(instruction[i + 1].u.operand);
1668 int srcDst = instruction[i + 2].u.operand;
1669 emitGetArg(srcDst, X86::eax);
1670 m_jit.movl_rr(X86::eax, X86::edx);
1671 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1672 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1673 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1674 emitPutResult(srcDst, X86::edx);
1675 emitPutResult(instruction[i + 1].u.operand);
1679 CTI_COMPILE_BINARY_OP(op_urshift)
1681 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1682 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1683 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1684 m_jit.xorl_rr(X86::edx, X86::eax);
1685 emitFastArithReTagImmediate(X86::eax);
1686 emitPutResult(instruction[i + 1].u.operand);
1690 case op_new_regexp: {
1691 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1692 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1693 emitCall(i, Machine::cti_op_new_regexp);
1694 emitPutResult(instruction[i + 1].u.operand);
1699 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1700 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1701 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1702 m_jit.orl_rr(X86::edx, X86::eax);
1703 emitPutResult(instruction[i + 1].u.operand);
1707 case op_call_eval: {
1708 compileOpCall(instruction, i, OpCallEval);
1713 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1714 emitCall(i, Machine::cti_op_throw);
1715 m_jit.addl_i8r(0x24, X86::esp);
1716 m_jit.popl_r(X86::edi);
1717 m_jit.popl_r(X86::esi);
1722 case op_get_pnames: {
1723 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1724 emitCall(i, Machine::cti_op_get_pnames);
1725 emitPutResult(instruction[i + 1].u.operand);
1729 case op_next_pname: {
1730 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1731 unsigned target = instruction[i + 3].u.operand;
1732 emitCall(i, Machine::cti_op_next_pname);
1733 m_jit.testl_rr(X86::eax, X86::eax);
1734 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1735 emitPutResult(instruction[i + 1].u.operand);
1736 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1737 m_jit.link(endOfIter, m_jit.label());
1741 case op_push_scope: {
1742 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1743 emitCall(i, Machine::cti_op_push_scope);
1747 case op_pop_scope: {
1748 emitCall(i, Machine::cti_op_pop_scope);
1752 CTI_COMPILE_UNARY_OP(op_typeof)
1753 CTI_COMPILE_UNARY_OP(op_is_undefined)
1754 CTI_COMPILE_UNARY_OP(op_is_boolean)
1755 CTI_COMPILE_UNARY_OP(op_is_number)
1756 CTI_COMPILE_UNARY_OP(op_is_string)
1757 CTI_COMPILE_UNARY_OP(op_is_object)
1758 CTI_COMPILE_UNARY_OP(op_is_function)
1760 compileOpStrictEq(instruction, i, OpStrictEq);
1764 case op_nstricteq: {
1765 compileOpStrictEq(instruction, i, OpNStrictEq);
1769 case op_to_jsnumber: {
1770 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1771 emitCall(i, Machine::cti_op_to_jsnumber);
1772 emitPutResult(instruction[i + 1].u.operand);
1777 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1778 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1779 emitCall(i, Machine::cti_op_in);
1780 emitPutResult(instruction[i + 1].u.operand);
1784 case op_push_new_scope: {
1785 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1786 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1787 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1788 emitCall(i, Machine::cti_op_push_new_scope);
1789 emitPutResult(instruction[i + 1].u.operand);
1794 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1795 emitPutResult(instruction[i + 1].u.operand);
1799 case op_jmp_scopes: {
1800 unsigned count = instruction[i + 1].u.operand;
1801 emitPutArgConstant(count, 0);
1802 emitCall(i, Machine::cti_op_jmp_scopes);
1803 unsigned target = instruction[i + 2].u.operand;
1804 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1808 case op_put_by_index: {
1809 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1810 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1811 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1812 emitCall(i, Machine::cti_op_put_by_index);
1816 case op_switch_imm: {
1817 unsigned tableIndex = instruction[i + 1].u.operand;
1818 unsigned defaultOffset = instruction[i + 2].u.operand;
1819 unsigned scrutinee = instruction[i + 3].u.operand;
1821 // create jump table for switch destinations, track this switch statement.
1822 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1823 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1824 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1826 emitGetPutArg(scrutinee, 0, X86::ecx);
1827 emitPutArgConstant(tableIndex, 4);
1828 emitCall(i, Machine::cti_op_switch_imm);
1829 m_jit.jmp_r(X86::eax);
1833 case op_switch_char: {
1834 unsigned tableIndex = instruction[i + 1].u.operand;
1835 unsigned defaultOffset = instruction[i + 2].u.operand;
1836 unsigned scrutinee = instruction[i + 3].u.operand;
1838 // create jump table for switch destinations, track this switch statement.
1839 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1840 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1841 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1843 emitGetPutArg(scrutinee, 0, X86::ecx);
1844 emitPutArgConstant(tableIndex, 4);
1845 emitCall(i, Machine::cti_op_switch_char);
1846 m_jit.jmp_r(X86::eax);
1850 case op_switch_string: {
1851 unsigned tableIndex = instruction[i + 1].u.operand;
1852 unsigned defaultOffset = instruction[i + 2].u.operand;
1853 unsigned scrutinee = instruction[i + 3].u.operand;
1855 // create jump table for switch destinations, track this switch statement.
1856 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1857 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1859 emitGetPutArg(scrutinee, 0, X86::ecx);
1860 emitPutArgConstant(tableIndex, 4);
1861 emitCall(i, Machine::cti_op_switch_string);
1862 m_jit.jmp_r(X86::eax);
1866 case op_del_by_val: {
1867 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1868 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1869 emitCall(i, Machine::cti_op_del_by_val);
1870 emitPutResult(instruction[i + 1].u.operand);
1874 case op_put_getter: {
1875 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1876 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1877 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1878 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1879 emitCall(i, Machine::cti_op_put_getter);
1883 case op_put_setter: {
1884 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1885 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1886 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1887 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1888 emitCall(i, Machine::cti_op_put_setter);
1892 case op_new_error: {
1893 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1894 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1895 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1896 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1897 emitCall(i, Machine::cti_op_new_error);
1898 emitPutResult(instruction[i + 1].u.operand);
1903 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1904 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1905 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1906 emitCall(i, Machine::cti_op_debug);
1911 unsigned dst = instruction[i + 1].u.operand;
1912 unsigned src1 = instruction[i + 2].u.operand;
1914 emitGetArg(src1, X86::eax);
1915 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1916 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1918 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1919 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1920 m_jit.setnz_r(X86::eax);
1922 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1924 m_jit.link(isImmediate, m_jit.label());
1926 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1927 m_jit.andl_rr(X86::eax, X86::ecx);
1928 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1929 m_jit.sete_r(X86::eax);
1931 m_jit.link(wasNotImmediate, m_jit.label());
1933 m_jit.movzbl_rr(X86::eax, X86::eax);
1934 emitTagAsBoolImmediate(X86::eax);
1941 unsigned dst = instruction[i + 1].u.operand;
1942 unsigned src1 = instruction[i + 2].u.operand;
1944 emitGetArg(src1, X86::eax);
1945 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1946 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1948 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1949 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1950 m_jit.setz_r(X86::eax);
1952 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1954 m_jit.link(isImmediate, m_jit.label());
1956 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1957 m_jit.andl_rr(X86::eax, X86::ecx);
1958 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1959 m_jit.setne_r(X86::eax);
1961 m_jit.link(wasNotImmediate, m_jit.label());
1963 m_jit.movzbl_rr(X86::eax, X86::eax);
1964 emitTagAsBoolImmediate(X86::eax);
1971 // Even though CTI doesn't use them, we initialize our constant
1972 // registers to zap stale pointers, to avoid unnecessarily prolonging
1973 // object lifetime and increasing GC pressure.
1974 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
1975 for (size_t j = 0; j < count; ++j)
1976 emitInitRegister(j);
1981 case op_enter_with_activation: {
1982 // Even though CTI doesn't use them, we initialize our constant
1983 // registers to zap stale pointers, to avoid unnecessarily prolonging
1984 // object lifetime and increasing GC pressure.
1985 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
1986 for (size_t j = 0; j < count; ++j)
1987 emitInitRegister(j);
1989 emitCall(i, Machine::cti_op_push_activation);
1990 emitPutResult(instruction[i + 1].u.operand);
1995 case op_create_arguments: {
1996 emitCall(i, Machine::cti_op_create_arguments);
2000 case op_convert_this: {
2001 emitGetArg(instruction[i + 1].u.operand, X86::eax);
2003 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
2004 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::edx);
2005 m_jit.testl_i32m(NeedsThisConversion, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx);
2006 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
2011 case op_get_array_length:
2012 case op_get_by_id_chain:
2013 case op_get_by_id_generic:
2014 case op_get_by_id_proto:
2015 case op_get_by_id_self:
2016 case op_get_string_length:
2017 case op_put_by_id_generic:
2018 case op_put_by_id_replace:
2019 case op_put_by_id_transition:
2020 ASSERT_NOT_REACHED();
2024 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2028 void CTI::privateCompileLinkPass()
2030 unsigned jmpTableCount = m_jmpTable.size();
2031 for (unsigned i = 0; i < jmpTableCount; ++i)
2032 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
2036 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
2038 m_jit.link(iter->from, m_jit.label()); \
2039 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
2040 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
2041 emitCall(i, Machine::cti_##name); \
2042 emitPutResult(instruction[i + 1].u.operand); \
2047 void CTI::privateCompileSlowCases()
2049 unsigned structureIDInstructionIndex = 0;
2051 Instruction* instruction = m_codeBlock->instructions.begin();
2052 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
2053 unsigned i = iter->to;
2054 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
2055 case op_convert_this: {
2056 m_jit.link(iter->from, m_jit.label());
2057 m_jit.link((++iter)->from, m_jit.label());
2058 emitPutArg(X86::eax, 0);
2059 emitCall(i, Machine::cti_op_convert_this);
2060 emitPutResult(instruction[i + 1].u.operand);
2065 unsigned dst = instruction[i + 1].u.operand;
2066 unsigned src1 = instruction[i + 2].u.operand;
2067 unsigned src2 = instruction[i + 3].u.operand;
2068 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
2069 X86Assembler::JmpSrc notImm = iter->from;
2070 m_jit.link((++iter)->from, m_jit.label());
2071 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
2072 m_jit.link(notImm, m_jit.label());
2073 emitGetPutArg(src1, 0, X86::ecx);
2074 emitPutArg(X86::edx, 4);
2075 emitCall(i, Machine::cti_op_add);
2077 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
2078 X86Assembler::JmpSrc notImm = iter->from;
2079 m_jit.link((++iter)->from, m_jit.label());
2080 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
2081 m_jit.link(notImm, m_jit.label());
2082 emitPutArg(X86::eax, 0);
2083 emitGetPutArg(src2, 4, X86::ecx);
2084 emitCall(i, Machine::cti_op_add);
2087 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
2088 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2089 compileBinaryArithOpSlowCase(op_add, iter, dst, src1, src2, types, i);
2091 ASSERT_NOT_REACHED();
2097 case op_get_by_val: {
2098 // The slow case that handles accesses to arrays (below) may jump back up to here.
2099 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
2101 X86Assembler::JmpSrc notImm = iter->from;
2102 m_jit.link((++iter)->from, m_jit.label());
2103 m_jit.link((++iter)->from, m_jit.label());
2104 emitFastArithIntToImmNoCheck(X86::edx);
2105 m_jit.link(notImm, m_jit.label());
2106 emitPutArg(X86::eax, 0);
2107 emitPutArg(X86::edx, 4);
2108 emitCall(i, Machine::cti_op_get_by_val);
2109 emitPutResult(instruction[i + 1].u.operand);
2110 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2112 // This is slow case that handles accesses to arrays above the fast cut-off.
2113 // First, check if this is an access to the vector
2114 m_jit.link((++iter)->from, m_jit.label());
2115 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
2116 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
2118 // okay, missed the fast region, but it is still in the vector. Get the value.
2119 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
2120 // Check whether the value loaded is zero; if so we need to return undefined.
2121 m_jit.testl_rr(X86::ecx, X86::ecx);
2122 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
2123 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
2129 compileBinaryArithOpSlowCase(op_sub, iter, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2134 m_jit.link(iter->from, m_jit.label());
2135 m_jit.link((++iter)->from, m_jit.label());
2136 emitPutArg(X86::eax, 0);
2137 emitPutArg(X86::ecx, 4);
2138 emitCall(i, Machine::cti_op_rshift);
2139 emitPutResult(instruction[i + 1].u.operand);
2144 X86Assembler::JmpSrc notImm1 = iter->from;
2145 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2146 m_jit.link((++iter)->from, m_jit.label());
2147 emitGetArg(instruction[i + 2].u.operand, X86::eax);
2148 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2149 m_jit.link(notImm1, m_jit.label());
2150 m_jit.link(notImm2, m_jit.label());
2151 emitPutArg(X86::eax, 0);
2152 emitPutArg(X86::ecx, 4);
2153 emitCall(i, Machine::cti_op_lshift);
2154 emitPutResult(instruction[i + 1].u.operand);
2158 case op_loop_if_less: {
2159 emitSlowScriptCheck(i);
2161 unsigned target = instruction[i + 3].u.operand;
2162 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2164 m_jit.link(iter->from, m_jit.label());
2165 emitPutArg(X86::edx, 0);
2166 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2167 emitCall(i, Machine::cti_op_loop_if_less);
2168 m_jit.testl_rr(X86::eax, X86::eax);
2169 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2171 m_jit.link(iter->from, m_jit.label());
2172 m_jit.link((++iter)->from, m_jit.label());
2173 emitPutArg(X86::eax, 0);
2174 emitPutArg(X86::edx, 4);
2175 emitCall(i, Machine::cti_op_loop_if_less);
2176 m_jit.testl_rr(X86::eax, X86::eax);
2177 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2182 case op_put_by_id: {
2183 m_jit.link(iter->from, m_jit.label());
2184 m_jit.link((++iter)->from, m_jit.label());
2186 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2187 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2188 emitPutArg(X86::eax, 0);
2189 emitPutArg(X86::edx, 8);
2190 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
2192 // Track the location of the call; this will be used to recover repatch information.
2193 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2194 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2195 ++structureIDInstructionIndex;
2200 case op_get_by_id: {
2201 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
2202 // so that we only need track one pointer into the slow case code - we track a pointer to the location
2203 // of the call (which we can use to look up the repatch information), but should a array-length or
2204 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
2205 // the distance from the call to the head of the slow case.
2207 m_jit.link(iter->from, m_jit.label());
2208 m_jit.link((++iter)->from, m_jit.label());
2211 X86Assembler::JmpDst coldPathBegin = m_jit.label();
2213 emitPutArg(X86::eax, 0);
2214 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
2215 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2216 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
2217 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
2218 emitPutResult(instruction[i + 1].u.operand);
2220 // Track the location of the call; this will be used to recover repatch information.
2221 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2222 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2223 ++structureIDInstructionIndex;
2228 case op_resolve_global: {
2229 ++structureIDInstructionIndex;
2233 case op_loop_if_lesseq: {
2234 emitSlowScriptCheck(i);
2236 unsigned target = instruction[i + 3].u.operand;
2237 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2239 m_jit.link(iter->from, m_jit.label());
2240 emitPutArg(X86::edx, 0);
2241 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2242 emitCall(i, Machine::cti_op_loop_if_lesseq);
2243 m_jit.testl_rr(X86::eax, X86::eax);
2244 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2246 m_jit.link(iter->from, m_jit.label());
2247 m_jit.link((++iter)->from, m_jit.label());
2248 emitPutArg(X86::eax, 0);
2249 emitPutArg(X86::edx, 4);
2250 emitCall(i, Machine::cti_op_loop_if_lesseq);
2251 m_jit.testl_rr(X86::eax, X86::eax);
2252 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2258 unsigned srcDst = instruction[i + 1].u.operand;
2259 X86Assembler::JmpSrc notImm = iter->from;
2260 m_jit.link((++iter)->from, m_jit.label());
2261 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2262 m_jit.link(notImm, m_jit.label());
2263 emitPutArg(X86::eax, 0);
2264 emitCall(i, Machine::cti_op_pre_inc);
2265 emitPutResult(srcDst);
2269 case op_put_by_val: {
2270 // Normal slow cases - either is not an immediate imm, or is an array.
2271 X86Assembler::JmpSrc notImm = iter->from;
2272 m_jit.link((++iter)->from, m_jit.label());
2273 m_jit.link((++iter)->from, m_jit.label());
2274 emitFastArithIntToImmNoCheck(X86::edx);
2275 m_jit.link(notImm, m_jit.label());
2276 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2277 emitPutArg(X86::eax, 0);
2278 emitPutArg(X86::edx, 4);
2279 emitPutArg(X86::ecx, 8);
2280 emitCall(i, Machine::cti_op_put_by_val);
2281 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2283 // slow cases for immediate int accesses to arrays
2284 m_jit.link((++iter)->from, m_jit.label());
2285 m_jit.link((++iter)->from, m_jit.label());
2286 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2287 emitPutArg(X86::eax, 0);
2288 emitPutArg(X86::edx, 4);
2289 emitPutArg(X86::ecx, 8);
2290 emitCall(i, Machine::cti_op_put_by_val_array);
2295 case op_loop_if_true: {
2296 emitSlowScriptCheck(i);
2298 m_jit.link(iter->from, m_jit.label());
2299 emitPutArg(X86::eax, 0);
2300 emitCall(i, Machine::cti_op_jtrue);
2301 m_jit.testl_rr(X86::eax, X86::eax);
2302 unsigned target = instruction[i + 2].u.operand;
2303 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2308 unsigned srcDst = instruction[i + 1].u.operand;
2309 X86Assembler::JmpSrc notImm = iter->from;
2310 m_jit.link((++iter)->from, m_jit.label());
2311 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2312 m_jit.link(notImm, m_jit.label());
2313 emitPutArg(X86::eax, 0);
2314 emitCall(i, Machine::cti_op_pre_dec);
2315 emitPutResult(srcDst);
2320 unsigned target = instruction[i + 3].u.operand;
2321 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2323 m_jit.link(iter->from, m_jit.label());
2324 emitPutArg(X86::edx, 0);
2325 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2326 emitCall(i, Machine::cti_op_jless);
2327 m_jit.testl_rr(X86::eax, X86::eax);
2328 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2330 m_jit.link(iter->from, m_jit.label());
2331 m_jit.link((++iter)->from, m_jit.label());
2332 emitPutArg(X86::eax, 0);
2333 emitPutArg(X86::edx, 4);
2334 emitCall(i, Machine::cti_op_jless);
2335 m_jit.testl_rr(X86::eax, X86::eax);
2336 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2342 m_jit.link(iter->from, m_jit.label());
2343 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2344 emitPutArg(X86::eax, 0);
2345 emitCall(i, Machine::cti_op_not);
2346 emitPutResult(instruction[i + 1].u.operand);
2351 m_jit.link(iter->from, m_jit.label());
2352 emitPutArg(X86::eax, 0);
2353 emitCall(i, Machine::cti_op_jtrue);
2354 m_jit.testl_rr(X86::eax, X86::eax);
2355 unsigned target = instruction[i + 2].u.operand;
2356 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2361 unsigned srcDst = instruction[i + 2].u.operand;
2362 m_jit.link(iter->from, m_jit.label());
2363 m_jit.link((++iter)->from, m_jit.label());
2364 emitPutArg(X86::eax, 0);
2365 emitCall(i, Machine::cti_op_post_inc);
2366 emitPutResult(instruction[i + 1].u.operand);
2367 emitPutResult(srcDst, X86::edx);
2372 m_jit.link(iter->from, m_jit.label());
2373 emitPutArg(X86::eax, 0);
2374 emitCall(i, Machine::cti_op_bitnot);
2375 emitPutResult(instruction[i + 1].u.operand);
2380 unsigned src1 = instruction[i + 2].u.operand;
2381 unsigned src2 = instruction[i + 3].u.operand;
2382 unsigned dst = instruction[i + 1].u.operand;
2383 if (getConstantImmediateNumericArg(src1)) {
2384 m_jit.link(iter->from, m_jit.label());
2385 emitGetPutArg(src1, 0, X86::ecx);
2386 emitPutArg(X86::eax, 4);
2387 emitCall(i, Machine::cti_op_bitand);
2389 } else if (getConstantImmediateNumericArg(src2)) {
2390 m_jit.link(iter->from, m_jit.label());
2391 emitPutArg(X86::eax, 0);
2392 emitGetPutArg(src2, 4, X86::ecx);
2393 emitCall(i, Machine::cti_op_bitand);
2396 m_jit.link(iter->from, m_jit.label());
2397 emitGetPutArg(src1, 0, X86::ecx);
2398 emitPutArg(X86::edx, 4);
2399 emitCall(i, Machine::cti_op_bitand);
2406 m_jit.link(iter->from, m_jit.label());
2407 emitPutArg(X86::eax, 0);
2408 emitCall(i, Machine::cti_op_jtrue);
2409 m_jit.testl_rr(X86::eax, X86::eax);
2410 unsigned target = instruction[i + 2].u.operand;
2411 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2416 unsigned srcDst = instruction[i + 2].u.operand;
2417 m_jit.link(iter->from, m_jit.label());
2418 m_jit.link((++iter)->from, m_jit.label());
2419 emitPutArg(X86::eax, 0);
2420 emitCall(i, Machine::cti_op_post_dec);
2421 emitPutResult(instruction[i + 1].u.operand);
2422 emitPutResult(srcDst, X86::edx);
2427 m_jit.link(iter->from, m_jit.label());
2428 emitPutArg(X86::eax, 0);
2429 emitPutArg(X86::edx, 4);
2430 emitCall(i, Machine::cti_op_bitxor);
2431 emitPutResult(instruction[i + 1].u.operand);
2436 m_jit.link(iter->from, m_jit.label());
2437 emitPutArg(X86::eax, 0);
2438 emitPutArg(X86::edx, 4);
2439 emitCall(i, Machine::cti_op_bitor);
2440 emitPutResult(instruction[i + 1].u.operand);
2445 m_jit.link(iter->from, m_jit.label());
2446 emitPutArg(X86::eax, 0);
2447 emitPutArg(X86::edx, 4);
2448 emitCall(i, Machine::cti_op_eq);
2449 emitPutResult(instruction[i + 1].u.operand);
2454 m_jit.link(iter->from, m_jit.label());
2455 emitPutArg(X86::eax, 0);
2456 emitPutArg(X86::edx, 4);
2457 emitCall(i, Machine::cti_op_neq);
2458 emitPutResult(instruction[i + 1].u.operand);
2462 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2463 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2464 case op_instanceof: {
2465 m_jit.link(iter->from, m_jit.label());
2466 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2467 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2468 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2469 emitCall(i, Machine::cti_op_instanceof);
2470 emitPutResult(instruction[i + 1].u.operand);
2475 X86Assembler::JmpSrc notImm1 = iter->from;
2476 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2477 m_jit.link((++iter)->from, m_jit.label());
2478 emitFastArithReTagImmediate(X86::eax);
2479 emitFastArithReTagImmediate(X86::ecx);
2480 m_jit.link(notImm1, m_jit.label());
2481 m_jit.link(notImm2, m_jit.label());
2482 emitPutArg(X86::eax, 0);
2483 emitPutArg(X86::ecx, 4);
2484 emitCall(i, Machine::cti_op_mod);
2485 emitPutResult(instruction[i + 1].u.operand);
2490 int dst = instruction[i + 1].u.operand;
2491 int src1 = instruction[i + 2].u.operand;
2492 int src2 = instruction[i + 3].u.operand;
2493 if (getConstantImmediateNumericArg(src1) || getConstantImmediateNumericArg(src2)) {
2494 m_jit.link(iter->from, m_jit.label());
2495 emitGetPutArg(src1, 0, X86::ecx);
2496 emitGetPutArg(src2, 4, X86::ecx);
2497 emitCall(i, Machine::cti_op_mul);
2500 compileBinaryArithOpSlowCase(op_mul, iter, dst, src1, src2, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2507 case op_construct: {
2508 m_jit.link(iter->from, m_jit.label());
2510 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2511 emitCall(i, Machine::cti_vm_compile);
2512 emitCall(i, X86::eax);
2514 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2516 // Put the return value in dst. In the interpreter, op_ret does this.
2517 emitPutResult(instruction[i + 1].u.operand);
2523 ASSERT_NOT_REACHED();
2527 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2530 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2533 void CTI::privateCompile()
2535 // Could use a popl_m, but would need to offset the following instruction if so.
2536 m_jit.popl_r(X86::ecx);
2537 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2539 privateCompileMainPass();
2540 privateCompileLinkPass();
2541 privateCompileSlowCases();
2543 ASSERT(m_jmpTable.isEmpty());
2545 void* code = m_jit.copy();
2548 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2549 for (unsigned i = 0; i < m_switches.size(); ++i) {
2550 SwitchRecord record = m_switches[i];
2551 unsigned opcodeIndex = record.m_opcodeIndex;
2553 if (record.m_type != SwitchRecord::String) {
2554 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2555 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2557 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2559 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2560 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2561 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2564 ASSERT(record.m_type == SwitchRecord::String);
2566 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2568 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2569 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2570 unsigned offset = it->second.branchOffset;
2571 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2576 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2577 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2579 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2581 X86Assembler::link(code, iter->from, iter->to);
2582 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2585 // Link absolute addresses for jsr
2586 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2587 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2589 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2590 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2591 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2592 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2595 m_codeBlock->ctiCode = code;
2598 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2600 // Check eax is an object of the right StructureID.
2601 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2602 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2603 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2604 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2606 // Checks out okay! - getDirectOffset
2607 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2608 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2611 void* code = m_jit.copy();
2614 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2615 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2617 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2619 ctiRepatchCallByReturnAddress(returnAddress, code);
2622 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2624 #if USE(CTI_REPATCH_PIC)
2625 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2627 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2628 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2630 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2631 // referencing the prototype object - let's speculatively load it's table nice and early!)
2632 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2633 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2634 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2636 // check eax is an object of the right StructureID.
2637 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2638 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2639 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2640 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2642 // Check the prototype object's StructureID had not changed.
2643 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2644 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2645 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2647 // Checks out okay! - getDirectOffset
2648 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2650 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2652 void* code = m_jit.copy();
2655 // Use the repatch information to link the failure cases back to the original slow case routine.
2656 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2657 X86Assembler::link(code, failureCases1, slowCaseBegin);
2658 X86Assembler::link(code, failureCases2, slowCaseBegin);
2659 X86Assembler::link(code, failureCases3, slowCaseBegin);
2661 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2662 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2663 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2665 // Track the stub we have created so that it will be deleted later.
2666 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2668 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2669 // FIXME: should revert this repatching, on failure.
2670 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2671 X86Assembler::repatchBranchOffset(jmpLocation, code);
2673 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2674 // referencing the prototype object - let's speculatively load it's table nice and early!)
2675 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2676 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2677 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2679 // check eax is an object of the right StructureID.
2680 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2681 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2682 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2683 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2685 // Check the prototype object's StructureID had not changed.
2686 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2687 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2688 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2690 // Checks out okay! - getDirectOffset
2691 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2695 void* code = m_jit.copy();
2698 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2699 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2700 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2702 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2704 ctiRepatchCallByReturnAddress(returnAddress, code);
2708 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2712 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2714 // Check eax is an object of the right StructureID.
2715 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2716 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2717 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2718 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2720 StructureID* currStructureID = structureID;
2721 RefPtr<StructureID>* chainEntries = chain->head();
2722 JSObject* protoObject = 0;
2723 for (unsigned i = 0; i<count; ++i) {
2724 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2725 currStructureID = chainEntries[i].get();
2727 // Check the prototype object's StructureID had not changed.
2728 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2729 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2730 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2732 ASSERT(protoObject);
2734 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2735 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2736 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2739 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2741 void* code = m_jit.copy();
2744 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2745 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2747 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2749 ctiRepatchCallByReturnAddress(returnAddress, code);
2752 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2754 // check eax is an object of the right StructureID.
2755 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2756 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2757 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2758 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2760 // checks out okay! - putDirectOffset
2761 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2762 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2765 void* code = m_jit.copy();
2768 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2769 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2771 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2773 ctiRepatchCallByReturnAddress(returnAddress, code);
2778 static JSValue* transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2780 baseObject->transitionTo(newStructureID);
2781 baseObject->putDirectOffset(cachedOffset, value);
2787 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2789 return oldStructureID->propertyStorageCapacity() != newStructureID->propertyStorageCapacity();
2792 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2794 Vector<X86Assembler::JmpSrc, 16> failureCases;
2795 // check eax is an object of the right StructureID.
2796 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2797 failureCases.append(m_jit.emitUnlinkedJne());
2798 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2799 failureCases.append(m_jit.emitUnlinkedJne());
2800 Vector<X86Assembler::JmpSrc> successCases;
2803 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2804 // proto(ecx) = baseObject->structureID()->prototype()
2805 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2806 failureCases.append(m_jit.emitUnlinkedJne());
2807 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2809 // ecx = baseObject->m_structureID
2810 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2811 // null check the prototype
2812 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2813 successCases.append(m_jit.emitUnlinkedJe());
2815 // Check the structure id
2816 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2817 failureCases.append(m_jit.emitUnlinkedJne());
2819 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2820 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2821 failureCases.append(m_jit.emitUnlinkedJne());
2822 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2825 failureCases.append(m_jit.emitUnlinkedJne());
2826 for (unsigned i = 0; i < successCases.size(); ++i)
2827 m_jit.link(successCases[i], m_jit.label());
2829 X86Assembler::JmpSrc callTarget;
2830 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2831 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2832 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2833 // codeblock should ensure oldStructureID->m_refCount > 0
2834 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2835 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2836 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2839 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2840 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2842 // Slow case transition -- we're going to need to quite a bit of work,
2843 // so just make a call
2844 m_jit.pushl_r(X86::edx);
2845 m_jit.pushl_r(X86::eax);
2846 m_jit.movl_i32r(cachedOffset, X86::eax);
2847 m_jit.pushl_r(X86::eax);
2848 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2849 m_jit.pushl_r(X86::eax);
2850 callTarget = m_jit.emitCall();
2851 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2855 X86Assembler::JmpSrc failureJump;
2856 if (failureCases.size()) {
2857 for (unsigned i = 0; i < failureCases.size(); ++i)
2858 m_jit.link(failureCases[i], m_jit.label());
2859 m_jit.emitRestoreArgumentReferenceForTrampoline();
2860 failureJump = m_jit.emitUnlinkedJmp();
2863 void* code = m_jit.copy();
2866 if (failureCases.size())
2867 X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2869 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2870 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2872 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2874 ctiRepatchCallByReturnAddress(returnAddress, code);
2877 void* CTI::privateCompileArrayLengthTrampoline()
2879 // Check eax is an array
2880 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2881 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2882 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2883 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2885 // Checks out okay! - get the length from the storage
2886 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2887 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2889 m_jit.addl_rr(X86::eax, X86::eax);
2890 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2891 m_jit.addl_i8r(1, X86::eax);
2895 void* code = m_jit.copy();
2898 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2899 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2900 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2905 void* CTI::privateCompileStringLengthTrampoline()
2907 // Check eax is a string
2908 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2909 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2910 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2911 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2913 // Checks out okay! - get the length from the Ustring.
2914 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2915 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2917 m_jit.addl_rr(X86::eax, X86::eax);
2918 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2919 m_jit.addl_i8r(1, X86::eax);
2923 void* code = m_jit.copy();
2926 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2927 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2928 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2933 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2935 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2937 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2938 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2939 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2941 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2942 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2943 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2946 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2948 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2950 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2951 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2952 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2954 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2955 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2956 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2959 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2961 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2963 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2964 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2966 // Check eax is an array
2967 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2968 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2969 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2970 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2972 // Checks out okay! - get the length from the storage
2973 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2974 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2976 m_jit.addl_rr(X86::ecx, X86::ecx);
2977 X86Assembler::JmpSrc failureClobberedECX = m_jit.emitUnlinkedJo();
2978 m_jit.addl_i8r(1, X86::ecx);
2980 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2982 m_jit.link(failureClobberedECX, m_jit.label());
2983 m_jit.emitRestoreArgumentReference();
2984 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJmp();
2986 void* code = m_jit.copy();
2989 // Use the repatch information to link the failure cases back to the original slow case routine.
2990 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2991 X86Assembler::link(code, failureCases1, slowCaseBegin);
2992 X86Assembler::link(code, failureCases2, slowCaseBegin);
2993 X86Assembler::link(code, failureCases3, slowCaseBegin);
2995 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2996 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2997 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2999 // Track the stub we have created so that it will be deleted later.
3000 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3002 // Finally repatch the jump to sow case back in the hot path to jump here instead.
3003 // FIXME: should revert this repatching, on failure.
3004 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3005 X86Assembler::repatchBranchOffset(jmpLocation, code);
3008 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
3010 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
3011 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
3012 m_jit.movl_mr(index * sizeof(Register), dst, dst);
3015 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
3017 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
3018 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
3019 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
3024 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
3026 // TODO: better error messages
3027 if (pattern.size() > MaxPatternSize) {
3028 *error_ptr = "regular expression too large";
3032 X86Assembler jit(exec->machine()->jitCodeBuffer());
3033 WRECParser parser(pattern, ignoreCase, multiline, jit);
3035 jit.emitConvertToFastCall();
3037 // Preserve regs & initialize outputRegister.
3038 jit.pushl_r(WRECGenerator::outputRegister);
3039 jit.pushl_r(WRECGenerator::currentValueRegister);
3040 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
3041 jit.pushl_r(WRECGenerator::currentPositionRegister);
3042 // load output pointer
3047 , X86::esp, WRECGenerator::outputRegister);
3049 // restart point on match fail.
3050 WRECGenerator::JmpDst nextLabel = jit.label();
3052 // (1) Parse Disjunction:
3054 // Parsing the disjunction should fully consume the pattern.
3055 JmpSrcVector failures;
3056 parser.parseDisjunction(failures);
3057 if (parser.isEndOfPattern()) {
3058 parser.m_err = WRECParser::Error_malformedPattern;
3061 // TODO: better error messages
3062 *error_ptr = "TODO: better error messages";
3067 // Set return value & pop registers from the stack.
3069 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
3070 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
3072 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
3073 jit.popl_r(X86::eax);
3074 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3075 jit.popl_r(WRECGenerator::currentValueRegister);
3076 jit.popl_r(WRECGenerator::outputRegister);
3079 jit.link(noOutput, jit.label());
3081 jit.popl_r(X86::eax);
3082 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3083 jit.popl_r(WRECGenerator::currentValueRegister);
3084 jit.popl_r(WRECGenerator::outputRegister);
3088 // All fails link to here. Progress the start point & if it is within scope, loop.
3089 // Otherwise, return fail value.
3090 WRECGenerator::JmpDst here = jit.label();
3091 for (unsigned i = 0; i < failures.size(); ++i)
3092 jit.link(failures[i], here);
3095 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
3096 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
3097 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
3098 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
3099 jit.link(jit.emitUnlinkedJle(), nextLabel);
3101 jit.addl_i8r(4, X86::esp);
3103 jit.movl_i32r(-1, X86::eax);
3104 jit.popl_r(WRECGenerator::currentValueRegister);
3105 jit.popl_r(WRECGenerator::outputRegister);
3108 *numSubpatterns_ptr = parser.m_numSubpatterns;
3110 void* code = jit.copy();
3115 #endif // ENABLE(WREC)
3119 #endif // ENABLE(CTI)