2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
33 #include "JSFunction.h"
35 #include "wrec/WREC.h"
36 #include "ResultType.h"
38 #include <sys/sysctl.h>
48 return true; // All X86 Macs are guaranteed to support at least SSE2
53 static const int SSE2FeatureBit = 1 << 26;
60 mov eax, 1 // cpuid function 1 gives us the standard feature set
66 // FIXME: Add GCC code to do above asm
68 present = (flags & SSE2FeatureBit) != 0;
72 static SSE2Check check;
77 #if COMPILER(GCC) && PLATFORM(X86)
80 ".globl _ctiTrampoline" "\n"
81 "_ctiTrampoline:" "\n"
84 "subl $0x24, %esp" "\n"
85 "movl $512, %esi" "\n"
86 "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = CTI_ARGS_r
87 "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
88 "addl $0x24, %esp" "\n"
95 ".globl _ctiVMThrowTrampoline" "\n"
96 "_ctiVMThrowTrampoline:" "\n"
97 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
98 "addl $0x24, %esp" "\n"
108 __declspec(naked) JSValue* ctiTrampoline(void* code, RegisterFile*, Register*, JSValue** exception, Profiler**, JSGlobalData*)
116 mov edi, [esp + 0x38];
125 __declspec(naked) void ctiVMThrowTrampoline()
129 call JSC::Machine::cti_vm_throw;
142 ALWAYS_INLINE bool CTI::isConstant(int src)
144 return src >= m_codeBlock->numVars && src < m_codeBlock->numVars + m_codeBlock->numConstants;
147 ALWAYS_INLINE JSValue* CTI::getConstant(ExecState* exec, int src)
149 return m_codeBlock->constantRegisters[src - m_codeBlock->numVars].jsValue(exec);
152 // get arg puts an arg from the SF register array into a h/w register
153 ALWAYS_INLINE void CTI::emitGetArg(int src, X86Assembler::RegisterID dst)
155 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
156 if (isConstant(src)) {
157 JSValue* js = getConstant(m_exec, src);
158 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
160 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
163 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
164 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
166 if (isConstant(src)) {
167 JSValue* js = getConstant(m_exec, src);
168 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
170 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
171 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
175 // puts an arg onto the stack, as an arg to a context threaded function.
176 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
178 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
181 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
183 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
186 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
188 if (isConstant(src)) {
189 JSValue* js = getConstant(m_exec, src);
190 return JSImmediate::isNumber(js) ? js : 0;
195 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
197 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
200 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
202 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
205 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
207 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
210 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
212 m_jit.movl_rm(from, entry * sizeof(Register), X86::edi);
215 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
217 m_jit.movl_mr(entry * sizeof(Register), X86::edi, to);
220 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
222 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
223 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
226 ALWAYS_INLINE void CTI::emitInitRegister(unsigned dst)
228 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
229 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
232 #if ENABLE(SAMPLING_TOOL)
233 unsigned inCalledCode = 0;
236 void ctiSetReturnAddress(void** where, void* what)
241 void ctiRepatchCallByReturnAddress(void* where, void* what)
243 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
248 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
251 if (isConstant(src1)) {
252 JSValue* js = getConstant(m_exec, src1);
254 JSImmediate::isImmediate(js) ?
255 (JSImmediate::isNumber(js) ? 'i' :
256 JSImmediate::isBoolean(js) ? 'b' :
257 js->isUndefined() ? 'u' :
258 js->isNull() ? 'n' : '?')
260 (js->isString() ? 's' :
261 js->isObject() ? 'o' :
265 if (isConstant(src2)) {
266 JSValue* js = getConstant(m_exec, src2);
268 JSImmediate::isImmediate(js) ?
269 (JSImmediate::isNumber(js) ? 'i' :
270 JSImmediate::isBoolean(js) ? 'b' :
271 js->isUndefined() ? 'u' :
272 js->isNull() ? 'n' : '?')
274 (js->isString() ? 's' :
275 js->isObject() ? 'o' :
278 if ((which1 != '*') | (which2 != '*'))
279 fprintf(stderr, "Types %c %c\n", which1, which2);
284 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, X86::RegisterID r)
286 m_jit.emitRestoreArgumentReference();
287 X86Assembler::JmpSrc call = m_jit.emitCall(r);
288 m_calls.append(CallRecord(call, opcodeIndex));
293 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
295 #if ENABLE(SAMPLING_TOOL)
296 m_jit.movl_i32m(1, &inCalledCode);
298 m_jit.emitRestoreArgumentReference();
299 X86Assembler::JmpSrc call = m_jit.emitCall();
300 m_calls.append(CallRecord(call, helper, opcodeIndex));
301 #if ENABLE(SAMPLING_TOOL)
302 m_jit.movl_i32m(0, &inCalledCode);
308 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
310 #if ENABLE(SAMPLING_TOOL)
311 m_jit.movl_i32m(1, &inCalledCode);
313 m_jit.emitRestoreArgumentReference();
314 X86Assembler::JmpSrc call = m_jit.emitCall();
315 m_calls.append(CallRecord(call, helper, opcodeIndex));
316 #if ENABLE(SAMPLING_TOOL)
317 m_jit.movl_i32m(0, &inCalledCode);
323 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
325 #if ENABLE(SAMPLING_TOOL)
326 m_jit.movl_i32m(1, &inCalledCode);
328 m_jit.emitRestoreArgumentReference();
329 X86Assembler::JmpSrc call = m_jit.emitCall();
330 m_calls.append(CallRecord(call, helper, opcodeIndex));
331 #if ENABLE(SAMPLING_TOOL)
332 m_jit.movl_i32m(0, &inCalledCode);
338 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
340 #if ENABLE(SAMPLING_TOOL)
341 m_jit.movl_i32m(1, &inCalledCode);
343 m_jit.emitRestoreArgumentReference();
344 X86Assembler::JmpSrc call = m_jit.emitCall();
345 m_calls.append(CallRecord(call, helper, opcodeIndex));
346 #if ENABLE(SAMPLING_TOOL)
347 m_jit.movl_i32m(0, &inCalledCode);
353 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
355 #if ENABLE(SAMPLING_TOOL)
356 m_jit.movl_i32m(1, &inCalledCode);
358 m_jit.emitRestoreArgumentReference();
359 X86Assembler::JmpSrc call = m_jit.emitCall();
360 m_calls.append(CallRecord(call, helper, opcodeIndex));
361 #if ENABLE(SAMPLING_TOOL)
362 m_jit.movl_i32m(0, &inCalledCode);
368 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_2 helper)
370 #if ENABLE(SAMPLING_TOOL)
371 m_jit.movl_i32m(1, &inCalledCode);
373 m_jit.emitRestoreArgumentReference();
374 X86Assembler::JmpSrc call = m_jit.emitCall();
375 m_calls.append(CallRecord(call, helper, opcodeIndex));
376 #if ENABLE(SAMPLING_TOOL)
377 m_jit.movl_i32m(0, &inCalledCode);
383 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
385 m_jit.testl_i32r(JSImmediate::TagMask, reg);
386 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
389 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
391 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
392 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
395 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
397 m_jit.movl_rr(reg1, X86::ecx);
398 m_jit.andl_rr(reg2, X86::ecx);
399 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
402 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
404 ASSERT(JSImmediate::isNumber(imm));
405 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
408 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
410 // op_mod relies on this being a sub - setting zf if result is 0.
411 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
414 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
416 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
419 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
421 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
424 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
426 m_jit.sarl_i8r(1, reg);
429 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
431 m_jit.addl_rr(reg, reg);
432 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
433 emitFastArithReTagImmediate(reg);
436 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
438 m_jit.addl_rr(reg, reg);
439 emitFastArithReTagImmediate(reg);
442 ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
444 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
445 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
448 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
449 : m_jit(machine->jitCodeBuffer())
452 , m_codeBlock(codeBlock)
453 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
454 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
458 #define CTI_COMPILE_BINARY_OP(name) \
460 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
461 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
462 emitCall(i, Machine::cti_##name); \
463 emitPutResult(instruction[i + 1].u.operand); \
468 #define CTI_COMPILE_UNARY_OP(name) \
470 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
471 emitCall(i, Machine::cti_##name); \
472 emitPutResult(instruction[i + 1].u.operand); \
477 #if ENABLE(SAMPLING_TOOL)
478 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
481 void CTI::compileOpCallInitializeCallFrame(unsigned callee, unsigned argCount)
483 emitGetArg(callee, X86::ecx); // Load callee JSFunction into ecx
484 m_jit.movl_rm(X86::eax, RegisterFile::CodeBlock * static_cast<int>(sizeof(Register)), X86::edx); // callee CodeBlock was returned in eax
485 m_jit.movl_i32m(reinterpret_cast<unsigned>(nullJSValue), RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)), X86::edx);
486 m_jit.movl_rm(X86::ecx, RegisterFile::Callee * static_cast<int>(sizeof(Register)), X86::edx);
488 m_jit.movl_mr(OBJECT_OFFSET(JSFunction, m_scopeChain) + OBJECT_OFFSET(ScopeChain, m_node), X86::ecx, X86::ecx); // newScopeChain
489 m_jit.movl_i32m(argCount, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)), X86::edx);
490 m_jit.movl_rm(X86::edi, RegisterFile::CallerRegisters * static_cast<int>(sizeof(Register)), X86::edx);
491 m_jit.movl_rm(X86::ecx, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)), X86::edx);
494 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
496 int dst = instruction[i + 1].u.operand;
497 int callee = instruction[i + 2].u.operand;
498 int firstArg = instruction[i + 4].u.operand;
499 int argCount = instruction[i + 5].u.operand;
500 int registerOffset = instruction[i + 6].u.operand;
502 if (type == OpCallEval)
503 emitGetPutArg(instruction[i + 3].u.operand, 16, X86::ecx);
505 if (type == OpConstruct) {
506 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 20);
507 emitPutArgConstant(argCount, 16);
508 emitPutArgConstant(registerOffset, 12);
509 emitPutArgConstant(firstArg, 8);
510 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
512 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 12);
513 emitPutArgConstant(argCount, 8);
514 emitPutArgConstant(registerOffset, 4);
516 int thisVal = instruction[i + 3].u.operand;
517 if (thisVal == missingThisObjectMarker()) {
518 // FIXME: should this be loaded dynamically off m_exec?
519 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_exec->globalThisValue()), firstArg * sizeof(Register), X86::edi);
521 emitGetArg(thisVal, X86::ecx);
522 emitPutResult(firstArg, X86::ecx);
526 X86Assembler::JmpSrc wasEval;
527 if (type == OpCallEval) {
528 emitGetPutArg(callee, 0, X86::ecx);
529 emitCall(i, Machine::cti_op_call_eval);
531 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
532 wasEval = m_jit.emitUnlinkedJne();
534 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
535 emitGetArg(callee, X86::ecx);
537 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
538 emitGetPutArg(callee, 0, X86::ecx);
541 // Fast check for JS function.
542 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
543 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
544 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
545 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
546 m_jit.link(isNotObject, m_jit.label());
548 // This handles host functions
549 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
551 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
552 m_jit.link(isJSFunction, m_jit.label());
554 // This handles JSFunctions
555 emitCall(i, (type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction);
557 compileOpCallInitializeCallFrame(callee, argCount);
559 // load ctiCode from the new codeBlock.
560 m_jit.movl_mr(OBJECT_OFFSET(CodeBlock, ctiCode), X86::eax, X86::eax);
562 // Setup the new value of 'r' in edi, and on the stack, too.
563 emitPutCTIParam(X86::edx, CTI_ARGS_r);
564 m_jit.movl_rr(X86::edx, X86::edi);
566 // Check the ctiCode has been generated - if not, this is handled in a slow case.
567 m_jit.testl_rr(X86::eax, X86::eax);
568 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
569 emitCall(i, X86::eax);
571 X86Assembler::JmpDst end = m_jit.label();
572 m_jit.link(wasNotJSFunction, end);
573 if (type == OpCallEval)
574 m_jit.link(wasEval, end);
576 // Put the return value in dst. In the interpreter, op_ret does this.
580 void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
582 bool negated = (type == OpNStrictEq);
584 unsigned dst = instruction[i + 1].u.operand;
585 unsigned src1 = instruction[i + 2].u.operand;
586 unsigned src2 = instruction[i + 3].u.operand;
588 emitGetArg(src1, X86::eax);
589 emitGetArg(src2, X86::edx);
591 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
592 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
593 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
594 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
596 m_jit.cmpl_rr(X86::edx, X86::eax);
598 m_jit.setne_r(X86::eax);
600 m_jit.sete_r(X86::eax);
601 m_jit.movzbl_rr(X86::eax, X86::eax);
602 emitTagAsBoolImmediate(X86::eax);
604 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
606 m_jit.link(firstNotImmediate, m_jit.label());
608 // check that edx is immediate but not the zero immediate
609 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
610 m_jit.setz_r(X86::ecx);
611 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
612 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
613 m_jit.sete_r(X86::edx);
614 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
615 m_jit.orl_rr(X86::ecx, X86::edx);
617 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
619 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
621 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
623 m_jit.link(secondNotImmediate, m_jit.label());
624 // check that eax is not the zero immediate (we know it must be immediate)
625 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
626 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
628 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
630 m_jit.link(bothWereImmediates, m_jit.label());
631 m_jit.link(firstWasNotImmediate, m_jit.label());
636 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
638 m_jit.subl_i8r(1, X86::esi);
639 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
640 emitCall(opcodeIndex, Machine::cti_timeout_check);
642 emitGetCTIParam(CTI_ARGS_globalData, X86::ecx);
643 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
644 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
645 m_jit.link(skipTimeout, m_jit.label());
649 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
651 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
652 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
654 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
655 control will fall through from the code planted.
657 void CTI::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
659 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
660 m_jit.cvttsd2si_rr(xmmSource, tempReg1);
661 m_jit.addl_rr(tempReg1, tempReg1);
662 m_jit.sarl_i8r(1, tempReg1);
663 m_jit.cvtsi2sd_rr(tempReg1, tempXmm);
664 // Compare & branch if immediate.
665 m_jit.ucomis_rr(tempXmm, xmmSource);
666 X86Assembler::JmpSrc resultIsImm = m_jit.emitUnlinkedJe();
667 X86Assembler::JmpDst resultLookedLikeImmButActuallyIsnt = m_jit.label();
669 // Store the result to the JSNumberCell and jump.
670 m_jit.movsd_rm(xmmSource, OBJECT_OFFSET(JSNumberCell, m_value), jsNumberCell);
671 emitPutResult(dst, jsNumberCell);
672 *wroteJSNumberCell = m_jit.emitUnlinkedJmp();
674 m_jit.link(resultIsImm, m_jit.label());
675 // value == (double)(JSImmediate)value... or at least, it looks that way...
676 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
677 m_jit.link(m_jit.emitUnlinkedJp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
678 m_jit.pextrw_irr(3, xmmSource, tempReg2);
679 m_jit.cmpl_i32r(0x8000, tempReg2);
680 m_jit.link(m_jit.emitUnlinkedJe(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
681 // Yes it really really really is representable as a JSImmediate.
682 emitFastArithIntToImmNoCheck(tempReg1);
683 emitPutResult(dst, X86::ecx);
686 void CTI::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
688 StructureID* numberStructureID = m_exec->globalData().numberStructureID.get();
689 X86Assembler::JmpSrc wasJSNumberCell1, wasJSNumberCell1b, wasJSNumberCell2, wasJSNumberCell2b;
691 emitGetArg(src1, X86::eax);
692 emitGetArg(src2, X86::edx);
694 if (types.second().isReusable() && isSSE2Present()) {
695 ASSERT(types.second().mightBeNumber());
697 // Check op2 is a number
698 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
699 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
700 if (!types.second().definitelyIsNumber()) {
701 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
702 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
703 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
706 // (1) In this case src2 is a reusable number cell.
707 // Slow case if src1 is not a number type.
708 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
709 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
710 if (!types.first().definitelyIsNumber()) {
711 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
712 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
713 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
716 // (1a) if we get here, src1 is also a number cell
717 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
718 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
719 // (1b) if we get here, src1 is an immediate
720 m_jit.link(op1imm, m_jit.label());
721 emitFastArithImmToInt(X86::eax);
722 m_jit.cvtsi2sd_rr(X86::eax, X86::xmm0);
724 m_jit.link(loadedDouble, m_jit.label());
725 if (opcodeID == op_add)
726 m_jit.addsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
727 else if (opcodeID == op_sub)
728 m_jit.subsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
730 ASSERT(opcodeID == op_mul);
731 m_jit.mulsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
734 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
735 wasJSNumberCell2b = m_jit.emitUnlinkedJmp();
737 // (2) This handles cases where src2 is an immediate number.
738 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
739 m_jit.link(op2imm, m_jit.label());
740 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
741 } else if (types.first().isReusable() && isSSE2Present()) {
742 ASSERT(types.first().mightBeNumber());
744 // Check op1 is a number
745 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
746 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
747 if (!types.first().definitelyIsNumber()) {
748 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
749 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
750 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
753 // (1) In this case src1 is a reusable number cell.
754 // Slow case if src2 is not a number type.
755 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
756 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
757 if (!types.second().definitelyIsNumber()) {
758 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
759 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
760 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
763 // (1a) if we get here, src2 is also a number cell
764 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
765 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
766 // (1b) if we get here, src2 is an immediate
767 m_jit.link(op2imm, m_jit.label());
768 emitFastArithImmToInt(X86::edx);
769 m_jit.cvtsi2sd_rr(X86::edx, X86::xmm1);
771 m_jit.link(loadedDouble, m_jit.label());
772 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
773 if (opcodeID == op_add)
774 m_jit.addsd_rr(X86::xmm1, X86::xmm0);
775 else if (opcodeID == op_sub)
776 m_jit.subsd_rr(X86::xmm1, X86::xmm0);
778 ASSERT(opcodeID == op_mul);
779 m_jit.mulsd_rr(X86::xmm1, X86::xmm0);
781 m_jit.movsd_rm(X86::xmm0, OBJECT_OFFSET(JSNumberCell, m_value), X86::eax);
784 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
785 wasJSNumberCell1b = m_jit.emitUnlinkedJmp();
787 // (2) This handles cases where src1 is an immediate number.
788 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
789 m_jit.link(op1imm, m_jit.label());
790 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
792 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
794 if (opcodeID == op_add) {
795 emitFastArithDeTagImmediate(X86::eax);
796 m_jit.addl_rr(X86::edx, X86::eax);
797 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
798 } else if (opcodeID == op_sub) {
799 m_jit.subl_rr(X86::edx, X86::eax);
800 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
801 emitFastArithReTagImmediate(X86::eax);
803 ASSERT(opcodeID == op_mul);
804 emitFastArithDeTagImmediate(X86::eax);
805 emitFastArithImmToInt(X86::edx);
806 m_jit.imull_rr(X86::edx, X86::eax);
807 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
808 emitFastArithReTagImmediate(X86::eax);
812 if (types.second().isReusable() && isSSE2Present()) {
813 m_jit.link(wasJSNumberCell2, m_jit.label());
814 m_jit.link(wasJSNumberCell2b, m_jit.label());
816 else if (types.first().isReusable() && isSSE2Present()) {
817 m_jit.link(wasJSNumberCell1, m_jit.label());
818 m_jit.link(wasJSNumberCell1b, m_jit.label());
822 void CTI::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
824 X86Assembler::JmpDst here = m_jit.label();
825 m_jit.link(iter->from, here);
826 if (types.second().isReusable() && isSSE2Present()) {
827 if (!types.first().definitelyIsNumber()) {
828 m_jit.link((++iter)->from, here);
829 m_jit.link((++iter)->from, here);
831 if (!types.second().definitelyIsNumber()) {
832 m_jit.link((++iter)->from, here);
833 m_jit.link((++iter)->from, here);
835 m_jit.link((++iter)->from, here);
836 } else if (types.first().isReusable() && isSSE2Present()) {
837 if (!types.first().definitelyIsNumber()) {
838 m_jit.link((++iter)->from, here);
839 m_jit.link((++iter)->from, here);
841 if (!types.second().definitelyIsNumber()) {
842 m_jit.link((++iter)->from, here);
843 m_jit.link((++iter)->from, here);
845 m_jit.link((++iter)->from, here);
847 m_jit.link((++iter)->from, here);
849 emitGetPutArg(src1, 0, X86::ecx);
850 emitGetPutArg(src2, 4, X86::ecx);
851 if (opcodeID == op_add)
852 emitCall(i, Machine::cti_op_add);
853 else if (opcodeID == op_sub)
854 emitCall(i, Machine::cti_op_sub);
856 ASSERT(opcodeID == op_mul);
857 emitCall(i, Machine::cti_op_mul);
862 void CTI::privateCompileMainPass()
864 Instruction* instruction = m_codeBlock->instructions.begin();
865 unsigned instructionCount = m_codeBlock->instructions.size();
867 unsigned structureIDInstructionIndex = 0;
869 for (unsigned i = 0; i < instructionCount; ) {
870 m_labels[i] = m_jit.label();
872 #if ENABLE(SAMPLING_TOOL)
873 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
876 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
877 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
879 unsigned src = instruction[i + 2].u.operand;
881 m_jit.movl_i32r(reinterpret_cast<unsigned>(getConstant(m_exec, src)), X86::edx);
883 emitGetArg(src, X86::edx);
884 emitPutResult(instruction[i + 1].u.operand, X86::edx);
889 unsigned dst = instruction[i + 1].u.operand;
890 unsigned src1 = instruction[i + 2].u.operand;
891 unsigned src2 = instruction[i + 3].u.operand;
893 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
894 emitGetArg(src2, X86::edx);
895 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
896 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
897 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
898 emitPutResult(dst, X86::edx);
899 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
900 emitGetArg(src1, X86::eax);
901 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
902 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
903 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
906 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
907 if (types.first().mightBeNumber() && types.second().mightBeNumber())
908 compileBinaryArithOp(op_add, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
910 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
911 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
912 emitCall(i, Machine::cti_op_add);
913 emitPutResult(instruction[i + 1].u.operand);
921 if (m_codeBlock->needsFullScopeChain)
922 emitCall(i, Machine::cti_op_end);
923 emitGetArg(instruction[i + 1].u.operand, X86::eax);
924 #if ENABLE(SAMPLING_TOOL)
925 m_jit.movl_i32m(-1, ¤tOpcodeID);
927 m_jit.pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
933 unsigned target = instruction[i + 1].u.operand;
934 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
939 int srcDst = instruction[i + 1].u.operand;
940 emitGetArg(srcDst, X86::eax);
941 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
942 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
943 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
944 emitPutResult(srcDst, X86::eax);
949 emitSlowScriptCheck(i);
951 unsigned target = instruction[i + 1].u.operand;
952 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
956 case op_loop_if_less: {
957 emitSlowScriptCheck(i);
959 unsigned target = instruction[i + 3].u.operand;
960 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
962 emitGetArg(instruction[i + 1].u.operand, X86::edx);
963 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
964 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
965 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
967 emitGetArg(instruction[i + 1].u.operand, X86::eax);
968 emitGetArg(instruction[i + 2].u.operand, X86::edx);
969 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
970 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
971 m_jit.cmpl_rr(X86::edx, X86::eax);
972 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
977 case op_loop_if_lesseq: {
978 emitSlowScriptCheck(i);
980 unsigned target = instruction[i + 3].u.operand;
981 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
983 emitGetArg(instruction[i + 1].u.operand, X86::edx);
984 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
985 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
986 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
988 emitGetArg(instruction[i + 1].u.operand, X86::eax);
989 emitGetArg(instruction[i + 2].u.operand, X86::edx);
990 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
991 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
992 m_jit.cmpl_rr(X86::edx, X86::eax);
993 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
998 case op_new_object: {
999 emitCall(i, Machine::cti_op_new_object);
1000 emitPutResult(instruction[i + 1].u.operand);
1004 case op_put_by_id: {
1005 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
1006 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1007 // such that the StructureID & offset are always at the same distance from this.
1009 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1010 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1012 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1013 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1014 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1015 ++structureIDInstructionIndex;
1017 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
1018 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1019 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1020 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1021 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
1022 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1024 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1025 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1026 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
1027 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
1032 case op_get_by_id: {
1033 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
1034 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
1035 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1036 // to jump back to if one of these trampolies finds a match.
1038 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1040 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1042 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1043 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1044 ++structureIDInstructionIndex;
1046 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1047 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1048 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
1049 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1050 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
1052 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1053 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
1054 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
1055 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1060 case op_instanceof: {
1061 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
1062 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
1063 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
1065 // check if any are immediates
1066 m_jit.orl_rr(X86::eax, X86::ecx);
1067 m_jit.orl_rr(X86::edx, X86::ecx);
1068 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
1070 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
1072 // check that all are object type - this is a bit of a bithack to avoid excess branching;
1073 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
1074 // this works because NumberType and StringType are smaller
1075 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
1076 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
1077 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1078 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
1079 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
1080 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
1081 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1082 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
1084 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1086 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
1087 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
1088 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
1089 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
1091 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1093 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
1094 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
1096 // optimistically load true result
1097 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(true)), X86::eax);
1099 X86Assembler::JmpDst loop = m_jit.label();
1101 // load value's prototype
1102 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
1103 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
1105 m_jit.cmpl_rr(X86::ecx, X86::edx);
1106 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
1108 m_jit.cmpl_i32r(reinterpret_cast<int32_t>(jsNull()), X86::ecx);
1109 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
1110 m_jit.link(goToLoop, loop);
1112 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(false)), X86::eax);
1114 m_jit.link(exit, m_jit.label());
1116 emitPutResult(instruction[i + 1].u.operand);
1121 case op_del_by_id: {
1122 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1123 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1124 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1125 emitCall(i, Machine::cti_op_del_by_id);
1126 emitPutResult(instruction[i + 1].u.operand);
1131 unsigned dst = instruction[i + 1].u.operand;
1132 unsigned src1 = instruction[i + 2].u.operand;
1133 unsigned src2 = instruction[i + 3].u.operand;
1135 if (JSValue* src1Value = getConstantImmediateNumericArg(src1)) {
1136 emitGetArg(src2, X86::eax);
1137 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1138 emitFastArithImmToInt(X86::eax);
1139 m_jit.imull_i32r(X86::eax, getDeTaggedConstantImmediate(src1Value), X86::eax);
1140 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1141 emitFastArithReTagImmediate(X86::eax);
1143 } else if (JSValue* src2Value = getConstantImmediateNumericArg(src2)) {
1144 emitGetArg(src1, X86::eax);
1145 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1146 emitFastArithImmToInt(X86::eax);
1147 m_jit.imull_i32r(X86::eax, getDeTaggedConstantImmediate(src2Value), X86::eax);
1148 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1149 emitFastArithReTagImmediate(X86::eax);
1152 compileBinaryArithOp(op_mul, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1158 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
1159 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1160 emitCall(i, Machine::cti_op_new_func);
1161 emitPutResult(instruction[i + 1].u.operand);
1166 compileOpCall(instruction, i);
1170 case op_get_global_var: {
1171 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
1172 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1173 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
1174 emitPutResult(instruction[i + 1].u.operand, X86::eax);
1178 case op_put_global_var: {
1179 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
1180 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1181 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1182 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
1186 case op_get_scoped_var: {
1187 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
1189 emitGetArg(RegisterFile::ScopeChain, X86::eax);
1191 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
1193 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
1194 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
1195 emitPutResult(instruction[i + 1].u.operand);
1199 case op_put_scoped_var: {
1200 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
1202 emitGetArg(RegisterFile::ScopeChain, X86::edx);
1203 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1205 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
1207 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
1208 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
1212 case op_tear_off_activation: {
1213 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1214 emitCall(i, Machine::cti_op_tear_off_activation);
1218 case op_tear_off_arguments: {
1219 emitCall(i, Machine::cti_op_tear_off_arguments);
1224 // Check for a profiler - if there is one, jump to the hook below.
1225 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
1226 m_jit.cmpl_i32m(0, X86::eax);
1227 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
1228 X86Assembler::JmpDst profiled = m_jit.label();
1230 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
1231 if (m_codeBlock->needsFullScopeChain)
1232 emitCall(i, Machine::cti_op_ret_scopeChain);
1234 // Return the result in %eax.
1235 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1237 // Grab the return address.
1238 emitGetArg(RegisterFile::ReturnPC, X86::edx);
1240 // Restore our caller's "r".
1241 emitGetArg(RegisterFile::CallerRegisters, X86::edi);
1242 emitPutCTIParam(X86::edi, CTI_ARGS_r);
1245 m_jit.pushl_r(X86::edx);
1249 m_jit.link(profile, m_jit.label());
1250 emitCall(i, Machine::cti_op_ret_profiler);
1251 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
1256 case op_new_array: {
1257 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1258 emitPutArg(X86::edx, 0);
1259 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1260 emitCall(i, Machine::cti_op_new_array);
1261 emitPutResult(instruction[i + 1].u.operand);
1266 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1267 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1268 emitCall(i, Machine::cti_op_resolve);
1269 emitPutResult(instruction[i + 1].u.operand);
1273 case op_construct: {
1274 compileOpCall(instruction, i, OpConstruct);
1278 case op_construct_verify: {
1279 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1281 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1282 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1283 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1284 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1285 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1287 m_jit.link(isImmediate, m_jit.label());
1288 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
1289 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1290 m_jit.link(isObject, m_jit.label());
1295 case op_get_by_val: {
1296 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1297 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1298 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1299 emitFastArithImmToInt(X86::edx);
1300 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1301 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1302 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1303 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1305 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1306 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1307 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1308 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1310 // Get the value from the vector
1311 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1312 emitPutResult(instruction[i + 1].u.operand);
1316 case op_resolve_func: {
1317 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1318 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1319 emitCall(i, Machine::cti_op_resolve_func);
1320 emitPutResult(instruction[i + 1].u.operand);
1321 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1326 compileBinaryArithOp(op_sub, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1330 case op_put_by_val: {
1331 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1332 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1333 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1334 emitFastArithImmToInt(X86::edx);
1335 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1336 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1337 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1338 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1340 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1341 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1342 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1343 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1344 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1345 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1346 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1348 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1349 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1350 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1351 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1353 // All good - put the value into the array.
1354 m_jit.link(inFastVector, m_jit.label());
1355 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1356 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1360 CTI_COMPILE_BINARY_OP(op_lesseq)
1361 case op_loop_if_true: {
1362 emitSlowScriptCheck(i);
1364 unsigned target = instruction[i + 2].u.operand;
1365 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1367 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1368 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1369 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1370 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1372 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1373 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1374 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1375 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1377 m_jit.link(isZero, m_jit.label());
1381 case op_resolve_base: {
1382 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1383 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1384 emitCall(i, Machine::cti_op_resolve_base);
1385 emitPutResult(instruction[i + 1].u.operand);
1390 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1391 emitCall(i, Machine::cti_op_negate);
1392 emitPutResult(instruction[i + 1].u.operand);
1396 case op_resolve_skip: {
1397 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1398 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1399 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1400 emitCall(i, Machine::cti_op_resolve_skip);
1401 emitPutResult(instruction[i + 1].u.operand);
1405 case op_resolve_global: {
1407 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1408 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1409 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1410 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1412 // Check StructureID of global object
1413 m_jit.movl_i32r(globalObject, X86::eax);
1414 m_jit.movl_mr(structureIDAddr, X86::edx);
1415 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1416 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1417 m_slowCases.append(SlowCaseEntry(slowCase, i));
1419 // Load cached property
1420 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1421 m_jit.movl_mr(offsetAddr, X86::edx);
1422 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1423 emitPutResult(instruction[i + 1].u.operand);
1424 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1427 m_jit.link(slowCase, m_jit.label());
1428 emitPutArgConstant(globalObject, 0);
1429 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1430 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1431 emitCall(i, Machine::cti_op_resolve_global);
1432 emitPutResult(instruction[i + 1].u.operand);
1433 m_jit.link(end, m_jit.label());
1435 ++structureIDInstructionIndex;
1438 CTI_COMPILE_BINARY_OP(op_div)
1440 int srcDst = instruction[i + 1].u.operand;
1441 emitGetArg(srcDst, X86::eax);
1442 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1443 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1444 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1445 emitPutResult(srcDst, X86::eax);
1450 unsigned target = instruction[i + 3].u.operand;
1451 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1453 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1454 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1455 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1456 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1458 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1459 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1460 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1461 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1462 m_jit.cmpl_rr(X86::edx, X86::eax);
1463 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1469 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1470 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1471 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1472 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1473 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1474 emitPutResult(instruction[i + 1].u.operand);
1479 unsigned target = instruction[i + 2].u.operand;
1480 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1482 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1483 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1484 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1485 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1487 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1488 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1489 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1490 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1492 m_jit.link(isNonZero, m_jit.label());
1497 int srcDst = instruction[i + 2].u.operand;
1498 emitGetArg(srcDst, X86::eax);
1499 m_jit.movl_rr(X86::eax, X86::edx);
1500 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1501 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1502 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1503 emitPutResult(srcDst, X86::edx);
1504 emitPutResult(instruction[i + 1].u.operand);
1508 case op_unexpected_load: {
1509 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1510 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1511 emitPutResult(instruction[i + 1].u.operand);
1516 int retAddrDst = instruction[i + 1].u.operand;
1517 int target = instruction[i + 2].u.operand;
1518 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1519 X86Assembler::JmpDst addrPosition = m_jit.label();
1520 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1521 X86Assembler::JmpDst sretTarget = m_jit.label();
1522 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1527 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1532 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1533 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1534 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1535 m_jit.cmpl_rr(X86::edx, X86::eax);
1536 m_jit.sete_r(X86::eax);
1537 m_jit.movzbl_rr(X86::eax, X86::eax);
1538 emitTagAsBoolImmediate(X86::eax);
1539 emitPutResult(instruction[i + 1].u.operand);
1544 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1545 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1546 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1547 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1548 emitFastArithImmToInt(X86::eax);
1549 emitFastArithImmToInt(X86::ecx);
1550 m_jit.shll_CLr(X86::eax);
1551 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1552 emitPutResult(instruction[i + 1].u.operand);
1557 unsigned src1 = instruction[i + 2].u.operand;
1558 unsigned src2 = instruction[i + 3].u.operand;
1559 unsigned dst = instruction[i + 1].u.operand;
1560 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1561 emitGetArg(src2, X86::eax);
1562 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1563 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1565 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1566 emitGetArg(src1, X86::eax);
1567 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1568 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1571 emitGetArg(src1, X86::eax);
1572 emitGetArg(src2, X86::edx);
1573 m_jit.andl_rr(X86::edx, X86::eax);
1574 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1581 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1582 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1583 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1584 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1585 emitFastArithImmToInt(X86::ecx);
1586 m_jit.sarl_CLr(X86::eax);
1587 emitFastArithPotentiallyReTagImmediate(X86::eax);
1588 emitPutResult(instruction[i + 1].u.operand);
1593 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1594 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1595 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1596 emitPutResult(instruction[i + 1].u.operand);
1600 case op_resolve_with_base: {
1601 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1602 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1603 emitCall(i, Machine::cti_op_resolve_with_base);
1604 emitPutResult(instruction[i + 1].u.operand);
1605 emitPutResult(instruction[i + 2].u.operand, X86::edx);
1609 case op_new_func_exp: {
1610 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1611 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1612 emitCall(i, Machine::cti_op_new_func_exp);
1613 emitPutResult(instruction[i + 1].u.operand);
1618 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1619 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1620 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1621 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1622 emitFastArithDeTagImmediate(X86::eax);
1623 emitFastArithDeTagImmediate(X86::ecx);
1624 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1626 m_jit.idivl_r(X86::ecx);
1627 emitFastArithReTagImmediate(X86::edx);
1628 m_jit.movl_rr(X86::edx, X86::eax);
1629 emitPutResult(instruction[i + 1].u.operand);
1634 unsigned target = instruction[i + 2].u.operand;
1635 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1637 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1638 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1639 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1640 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1642 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1643 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1644 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1645 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1647 m_jit.link(isZero, m_jit.label());
1651 CTI_COMPILE_BINARY_OP(op_less)
1653 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1654 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1655 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1656 m_jit.cmpl_rr(X86::eax, X86::edx);
1658 m_jit.setne_r(X86::eax);
1659 m_jit.movzbl_rr(X86::eax, X86::eax);
1660 emitTagAsBoolImmediate(X86::eax);
1662 emitPutResult(instruction[i + 1].u.operand);
1668 int srcDst = instruction[i + 2].u.operand;
1669 emitGetArg(srcDst, X86::eax);
1670 m_jit.movl_rr(X86::eax, X86::edx);
1671 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1672 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1673 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1674 emitPutResult(srcDst, X86::edx);
1675 emitPutResult(instruction[i + 1].u.operand);
1679 CTI_COMPILE_BINARY_OP(op_urshift)
1681 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1682 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1683 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1684 m_jit.xorl_rr(X86::edx, X86::eax);
1685 emitFastArithReTagImmediate(X86::eax);
1686 emitPutResult(instruction[i + 1].u.operand);
1690 case op_new_regexp: {
1691 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1692 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1693 emitCall(i, Machine::cti_op_new_regexp);
1694 emitPutResult(instruction[i + 1].u.operand);
1699 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1700 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1701 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1702 m_jit.orl_rr(X86::edx, X86::eax);
1703 emitPutResult(instruction[i + 1].u.operand);
1707 case op_call_eval: {
1708 compileOpCall(instruction, i, OpCallEval);
1713 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1714 emitCall(i, Machine::cti_op_throw);
1715 m_jit.addl_i8r(0x24, X86::esp);
1716 m_jit.popl_r(X86::edi);
1717 m_jit.popl_r(X86::esi);
1722 case op_get_pnames: {
1723 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1724 emitCall(i, Machine::cti_op_get_pnames);
1725 emitPutResult(instruction[i + 1].u.operand);
1729 case op_next_pname: {
1730 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1731 unsigned target = instruction[i + 3].u.operand;
1732 emitCall(i, Machine::cti_op_next_pname);
1733 m_jit.testl_rr(X86::eax, X86::eax);
1734 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1735 emitPutResult(instruction[i + 1].u.operand);
1736 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1737 m_jit.link(endOfIter, m_jit.label());
1741 case op_push_scope: {
1742 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1743 emitCall(i, Machine::cti_op_push_scope);
1747 case op_pop_scope: {
1748 emitCall(i, Machine::cti_op_pop_scope);
1752 CTI_COMPILE_UNARY_OP(op_typeof)
1753 CTI_COMPILE_UNARY_OP(op_is_undefined)
1754 CTI_COMPILE_UNARY_OP(op_is_boolean)
1755 CTI_COMPILE_UNARY_OP(op_is_number)
1756 CTI_COMPILE_UNARY_OP(op_is_string)
1757 CTI_COMPILE_UNARY_OP(op_is_object)
1758 CTI_COMPILE_UNARY_OP(op_is_function)
1760 compileOpStrictEq(instruction, i, OpStrictEq);
1764 case op_nstricteq: {
1765 compileOpStrictEq(instruction, i, OpNStrictEq);
1769 case op_to_jsnumber: {
1770 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1772 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1773 X86Assembler::JmpSrc wasImmediate = m_jit.emitUnlinkedJnz();
1775 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1776 m_jit.cmpl_i32m(NumberType, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::ecx);
1778 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1780 m_jit.link(wasImmediate, m_jit.label());
1782 emitPutResult(instruction[i + 1].u.operand);
1787 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1788 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1789 emitCall(i, Machine::cti_op_in);
1790 emitPutResult(instruction[i + 1].u.operand);
1794 case op_push_new_scope: {
1795 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1796 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1797 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1798 emitCall(i, Machine::cti_op_push_new_scope);
1799 emitPutResult(instruction[i + 1].u.operand);
1804 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1805 emitPutResult(instruction[i + 1].u.operand);
1809 case op_jmp_scopes: {
1810 unsigned count = instruction[i + 1].u.operand;
1811 emitPutArgConstant(count, 0);
1812 emitCall(i, Machine::cti_op_jmp_scopes);
1813 unsigned target = instruction[i + 2].u.operand;
1814 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1818 case op_put_by_index: {
1819 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1820 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1821 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1822 emitCall(i, Machine::cti_op_put_by_index);
1826 case op_switch_imm: {
1827 unsigned tableIndex = instruction[i + 1].u.operand;
1828 unsigned defaultOffset = instruction[i + 2].u.operand;
1829 unsigned scrutinee = instruction[i + 3].u.operand;
1831 // create jump table for switch destinations, track this switch statement.
1832 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1833 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1834 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1836 emitGetPutArg(scrutinee, 0, X86::ecx);
1837 emitPutArgConstant(tableIndex, 4);
1838 emitCall(i, Machine::cti_op_switch_imm);
1839 m_jit.jmp_r(X86::eax);
1843 case op_switch_char: {
1844 unsigned tableIndex = instruction[i + 1].u.operand;
1845 unsigned defaultOffset = instruction[i + 2].u.operand;
1846 unsigned scrutinee = instruction[i + 3].u.operand;
1848 // create jump table for switch destinations, track this switch statement.
1849 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1850 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1851 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1853 emitGetPutArg(scrutinee, 0, X86::ecx);
1854 emitPutArgConstant(tableIndex, 4);
1855 emitCall(i, Machine::cti_op_switch_char);
1856 m_jit.jmp_r(X86::eax);
1860 case op_switch_string: {
1861 unsigned tableIndex = instruction[i + 1].u.operand;
1862 unsigned defaultOffset = instruction[i + 2].u.operand;
1863 unsigned scrutinee = instruction[i + 3].u.operand;
1865 // create jump table for switch destinations, track this switch statement.
1866 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1867 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1869 emitGetPutArg(scrutinee, 0, X86::ecx);
1870 emitPutArgConstant(tableIndex, 4);
1871 emitCall(i, Machine::cti_op_switch_string);
1872 m_jit.jmp_r(X86::eax);
1876 case op_del_by_val: {
1877 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1878 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1879 emitCall(i, Machine::cti_op_del_by_val);
1880 emitPutResult(instruction[i + 1].u.operand);
1884 case op_put_getter: {
1885 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1886 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1887 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1888 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1889 emitCall(i, Machine::cti_op_put_getter);
1893 case op_put_setter: {
1894 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1895 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1896 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1897 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1898 emitCall(i, Machine::cti_op_put_setter);
1902 case op_new_error: {
1903 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1904 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1905 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1906 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1907 emitCall(i, Machine::cti_op_new_error);
1908 emitPutResult(instruction[i + 1].u.operand);
1913 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1914 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1915 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1916 emitCall(i, Machine::cti_op_debug);
1921 unsigned dst = instruction[i + 1].u.operand;
1922 unsigned src1 = instruction[i + 2].u.operand;
1924 emitGetArg(src1, X86::eax);
1925 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1926 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1928 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1929 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1930 m_jit.setnz_r(X86::eax);
1932 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1934 m_jit.link(isImmediate, m_jit.label());
1936 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1937 m_jit.andl_rr(X86::eax, X86::ecx);
1938 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1939 m_jit.sete_r(X86::eax);
1941 m_jit.link(wasNotImmediate, m_jit.label());
1943 m_jit.movzbl_rr(X86::eax, X86::eax);
1944 emitTagAsBoolImmediate(X86::eax);
1951 unsigned dst = instruction[i + 1].u.operand;
1952 unsigned src1 = instruction[i + 2].u.operand;
1954 emitGetArg(src1, X86::eax);
1955 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1956 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1958 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1959 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1960 m_jit.setz_r(X86::eax);
1962 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1964 m_jit.link(isImmediate, m_jit.label());
1966 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1967 m_jit.andl_rr(X86::eax, X86::ecx);
1968 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1969 m_jit.setne_r(X86::eax);
1971 m_jit.link(wasNotImmediate, m_jit.label());
1973 m_jit.movzbl_rr(X86::eax, X86::eax);
1974 emitTagAsBoolImmediate(X86::eax);
1981 // Even though CTI doesn't use them, we initialize our constant
1982 // registers to zap stale pointers, to avoid unnecessarily prolonging
1983 // object lifetime and increasing GC pressure.
1984 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
1985 for (size_t j = 0; j < count; ++j)
1986 emitInitRegister(j);
1991 case op_enter_with_activation: {
1992 // Even though CTI doesn't use them, we initialize our constant
1993 // registers to zap stale pointers, to avoid unnecessarily prolonging
1994 // object lifetime and increasing GC pressure.
1995 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
1996 for (size_t j = 0; j < count; ++j)
1997 emitInitRegister(j);
1999 emitCall(i, Machine::cti_op_push_activation);
2000 emitPutResult(instruction[i + 1].u.operand);
2005 case op_create_arguments: {
2006 emitCall(i, Machine::cti_op_create_arguments);
2010 case op_convert_this: {
2011 emitGetArg(instruction[i + 1].u.operand, X86::eax);
2013 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
2014 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::edx);
2015 m_jit.testl_i32m(NeedsThisConversion, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx);
2016 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
2021 case op_get_array_length:
2022 case op_get_by_id_chain:
2023 case op_get_by_id_generic:
2024 case op_get_by_id_proto:
2025 case op_get_by_id_self:
2026 case op_get_string_length:
2027 case op_put_by_id_generic:
2028 case op_put_by_id_replace:
2029 case op_put_by_id_transition:
2030 ASSERT_NOT_REACHED();
2034 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2038 void CTI::privateCompileLinkPass()
2040 unsigned jmpTableCount = m_jmpTable.size();
2041 for (unsigned i = 0; i < jmpTableCount; ++i)
2042 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
2046 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
2048 m_jit.link(iter->from, m_jit.label()); \
2049 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
2050 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
2051 emitCall(i, Machine::cti_##name); \
2052 emitPutResult(instruction[i + 1].u.operand); \
2057 void CTI::privateCompileSlowCases()
2059 unsigned structureIDInstructionIndex = 0;
2061 Instruction* instruction = m_codeBlock->instructions.begin();
2062 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
2063 unsigned i = iter->to;
2064 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
2065 case op_convert_this: {
2066 m_jit.link(iter->from, m_jit.label());
2067 m_jit.link((++iter)->from, m_jit.label());
2068 emitPutArg(X86::eax, 0);
2069 emitCall(i, Machine::cti_op_convert_this);
2070 emitPutResult(instruction[i + 1].u.operand);
2075 unsigned dst = instruction[i + 1].u.operand;
2076 unsigned src1 = instruction[i + 2].u.operand;
2077 unsigned src2 = instruction[i + 3].u.operand;
2078 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
2079 X86Assembler::JmpSrc notImm = iter->from;
2080 m_jit.link((++iter)->from, m_jit.label());
2081 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
2082 m_jit.link(notImm, m_jit.label());
2083 emitGetPutArg(src1, 0, X86::ecx);
2084 emitPutArg(X86::edx, 4);
2085 emitCall(i, Machine::cti_op_add);
2087 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
2088 X86Assembler::JmpSrc notImm = iter->from;
2089 m_jit.link((++iter)->from, m_jit.label());
2090 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
2091 m_jit.link(notImm, m_jit.label());
2092 emitPutArg(X86::eax, 0);
2093 emitGetPutArg(src2, 4, X86::ecx);
2094 emitCall(i, Machine::cti_op_add);
2097 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
2098 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2099 compileBinaryArithOpSlowCase(op_add, iter, dst, src1, src2, types, i);
2101 ASSERT_NOT_REACHED();
2107 case op_get_by_val: {
2108 // The slow case that handles accesses to arrays (below) may jump back up to here.
2109 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
2111 X86Assembler::JmpSrc notImm = iter->from;
2112 m_jit.link((++iter)->from, m_jit.label());
2113 m_jit.link((++iter)->from, m_jit.label());
2114 emitFastArithIntToImmNoCheck(X86::edx);
2115 m_jit.link(notImm, m_jit.label());
2116 emitPutArg(X86::eax, 0);
2117 emitPutArg(X86::edx, 4);
2118 emitCall(i, Machine::cti_op_get_by_val);
2119 emitPutResult(instruction[i + 1].u.operand);
2120 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2122 // This is slow case that handles accesses to arrays above the fast cut-off.
2123 // First, check if this is an access to the vector
2124 m_jit.link((++iter)->from, m_jit.label());
2125 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
2126 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
2128 // okay, missed the fast region, but it is still in the vector. Get the value.
2129 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
2130 // Check whether the value loaded is zero; if so we need to return undefined.
2131 m_jit.testl_rr(X86::ecx, X86::ecx);
2132 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
2133 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
2139 compileBinaryArithOpSlowCase(op_sub, iter, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2144 m_jit.link(iter->from, m_jit.label());
2145 m_jit.link((++iter)->from, m_jit.label());
2146 emitPutArg(X86::eax, 0);
2147 emitPutArg(X86::ecx, 4);
2148 emitCall(i, Machine::cti_op_rshift);
2149 emitPutResult(instruction[i + 1].u.operand);
2154 X86Assembler::JmpSrc notImm1 = iter->from;
2155 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2156 m_jit.link((++iter)->from, m_jit.label());
2157 emitGetArg(instruction[i + 2].u.operand, X86::eax);
2158 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2159 m_jit.link(notImm1, m_jit.label());
2160 m_jit.link(notImm2, m_jit.label());
2161 emitPutArg(X86::eax, 0);
2162 emitPutArg(X86::ecx, 4);
2163 emitCall(i, Machine::cti_op_lshift);
2164 emitPutResult(instruction[i + 1].u.operand);
2168 case op_loop_if_less: {
2169 emitSlowScriptCheck(i);
2171 unsigned target = instruction[i + 3].u.operand;
2172 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2174 m_jit.link(iter->from, m_jit.label());
2175 emitPutArg(X86::edx, 0);
2176 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2177 emitCall(i, Machine::cti_op_loop_if_less);
2178 m_jit.testl_rr(X86::eax, X86::eax);
2179 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2181 m_jit.link(iter->from, m_jit.label());
2182 m_jit.link((++iter)->from, m_jit.label());
2183 emitPutArg(X86::eax, 0);
2184 emitPutArg(X86::edx, 4);
2185 emitCall(i, Machine::cti_op_loop_if_less);
2186 m_jit.testl_rr(X86::eax, X86::eax);
2187 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2192 case op_put_by_id: {
2193 m_jit.link(iter->from, m_jit.label());
2194 m_jit.link((++iter)->from, m_jit.label());
2196 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2197 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2198 emitPutArg(X86::eax, 0);
2199 emitPutArg(X86::edx, 8);
2200 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
2202 // Track the location of the call; this will be used to recover repatch information.
2203 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2204 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2205 ++structureIDInstructionIndex;
2210 case op_get_by_id: {
2211 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
2212 // so that we only need track one pointer into the slow case code - we track a pointer to the location
2213 // of the call (which we can use to look up the repatch information), but should a array-length or
2214 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
2215 // the distance from the call to the head of the slow case.
2217 m_jit.link(iter->from, m_jit.label());
2218 m_jit.link((++iter)->from, m_jit.label());
2221 X86Assembler::JmpDst coldPathBegin = m_jit.label();
2223 emitPutArg(X86::eax, 0);
2224 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
2225 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2226 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
2227 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
2228 emitPutResult(instruction[i + 1].u.operand);
2230 // Track the location of the call; this will be used to recover repatch information.
2231 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2232 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2233 ++structureIDInstructionIndex;
2238 case op_resolve_global: {
2239 ++structureIDInstructionIndex;
2243 case op_loop_if_lesseq: {
2244 emitSlowScriptCheck(i);
2246 unsigned target = instruction[i + 3].u.operand;
2247 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2249 m_jit.link(iter->from, m_jit.label());
2250 emitPutArg(X86::edx, 0);
2251 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2252 emitCall(i, Machine::cti_op_loop_if_lesseq);
2253 m_jit.testl_rr(X86::eax, X86::eax);
2254 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2256 m_jit.link(iter->from, m_jit.label());
2257 m_jit.link((++iter)->from, m_jit.label());
2258 emitPutArg(X86::eax, 0);
2259 emitPutArg(X86::edx, 4);
2260 emitCall(i, Machine::cti_op_loop_if_lesseq);
2261 m_jit.testl_rr(X86::eax, X86::eax);
2262 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2268 unsigned srcDst = instruction[i + 1].u.operand;
2269 X86Assembler::JmpSrc notImm = iter->from;
2270 m_jit.link((++iter)->from, m_jit.label());
2271 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2272 m_jit.link(notImm, m_jit.label());
2273 emitPutArg(X86::eax, 0);
2274 emitCall(i, Machine::cti_op_pre_inc);
2275 emitPutResult(srcDst);
2279 case op_put_by_val: {
2280 // Normal slow cases - either is not an immediate imm, or is an array.
2281 X86Assembler::JmpSrc notImm = iter->from;
2282 m_jit.link((++iter)->from, m_jit.label());
2283 m_jit.link((++iter)->from, m_jit.label());
2284 emitFastArithIntToImmNoCheck(X86::edx);
2285 m_jit.link(notImm, m_jit.label());
2286 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2287 emitPutArg(X86::eax, 0);
2288 emitPutArg(X86::edx, 4);
2289 emitPutArg(X86::ecx, 8);
2290 emitCall(i, Machine::cti_op_put_by_val);
2291 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2293 // slow cases for immediate int accesses to arrays
2294 m_jit.link((++iter)->from, m_jit.label());
2295 m_jit.link((++iter)->from, m_jit.label());
2296 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2297 emitPutArg(X86::eax, 0);
2298 emitPutArg(X86::edx, 4);
2299 emitPutArg(X86::ecx, 8);
2300 emitCall(i, Machine::cti_op_put_by_val_array);
2305 case op_loop_if_true: {
2306 emitSlowScriptCheck(i);
2308 m_jit.link(iter->from, m_jit.label());
2309 emitPutArg(X86::eax, 0);
2310 emitCall(i, Machine::cti_op_jtrue);
2311 m_jit.testl_rr(X86::eax, X86::eax);
2312 unsigned target = instruction[i + 2].u.operand;
2313 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2318 unsigned srcDst = instruction[i + 1].u.operand;
2319 X86Assembler::JmpSrc notImm = iter->from;
2320 m_jit.link((++iter)->from, m_jit.label());
2321 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2322 m_jit.link(notImm, m_jit.label());
2323 emitPutArg(X86::eax, 0);
2324 emitCall(i, Machine::cti_op_pre_dec);
2325 emitPutResult(srcDst);
2330 unsigned target = instruction[i + 3].u.operand;
2331 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2333 m_jit.link(iter->from, m_jit.label());
2334 emitPutArg(X86::edx, 0);
2335 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2336 emitCall(i, Machine::cti_op_jless);
2337 m_jit.testl_rr(X86::eax, X86::eax);
2338 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2340 m_jit.link(iter->from, m_jit.label());
2341 m_jit.link((++iter)->from, m_jit.label());
2342 emitPutArg(X86::eax, 0);
2343 emitPutArg(X86::edx, 4);
2344 emitCall(i, Machine::cti_op_jless);
2345 m_jit.testl_rr(X86::eax, X86::eax);
2346 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2352 m_jit.link(iter->from, m_jit.label());
2353 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2354 emitPutArg(X86::eax, 0);
2355 emitCall(i, Machine::cti_op_not);
2356 emitPutResult(instruction[i + 1].u.operand);
2361 m_jit.link(iter->from, m_jit.label());
2362 emitPutArg(X86::eax, 0);
2363 emitCall(i, Machine::cti_op_jtrue);
2364 m_jit.testl_rr(X86::eax, X86::eax);
2365 unsigned target = instruction[i + 2].u.operand;
2366 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2371 unsigned srcDst = instruction[i + 2].u.operand;
2372 m_jit.link(iter->from, m_jit.label());
2373 m_jit.link((++iter)->from, m_jit.label());
2374 emitPutArg(X86::eax, 0);
2375 emitCall(i, Machine::cti_op_post_inc);
2376 emitPutResult(instruction[i + 1].u.operand);
2377 emitPutResult(srcDst, X86::edx);
2382 m_jit.link(iter->from, m_jit.label());
2383 emitPutArg(X86::eax, 0);
2384 emitCall(i, Machine::cti_op_bitnot);
2385 emitPutResult(instruction[i + 1].u.operand);
2390 unsigned src1 = instruction[i + 2].u.operand;
2391 unsigned src2 = instruction[i + 3].u.operand;
2392 unsigned dst = instruction[i + 1].u.operand;
2393 if (getConstantImmediateNumericArg(src1)) {
2394 m_jit.link(iter->from, m_jit.label());
2395 emitGetPutArg(src1, 0, X86::ecx);
2396 emitPutArg(X86::eax, 4);
2397 emitCall(i, Machine::cti_op_bitand);
2399 } else if (getConstantImmediateNumericArg(src2)) {
2400 m_jit.link(iter->from, m_jit.label());
2401 emitPutArg(X86::eax, 0);
2402 emitGetPutArg(src2, 4, X86::ecx);
2403 emitCall(i, Machine::cti_op_bitand);
2406 m_jit.link(iter->from, m_jit.label());
2407 emitGetPutArg(src1, 0, X86::ecx);
2408 emitPutArg(X86::edx, 4);
2409 emitCall(i, Machine::cti_op_bitand);
2416 m_jit.link(iter->from, m_jit.label());
2417 emitPutArg(X86::eax, 0);
2418 emitCall(i, Machine::cti_op_jtrue);
2419 m_jit.testl_rr(X86::eax, X86::eax);
2420 unsigned target = instruction[i + 2].u.operand;
2421 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2426 unsigned srcDst = instruction[i + 2].u.operand;
2427 m_jit.link(iter->from, m_jit.label());
2428 m_jit.link((++iter)->from, m_jit.label());
2429 emitPutArg(X86::eax, 0);
2430 emitCall(i, Machine::cti_op_post_dec);
2431 emitPutResult(instruction[i + 1].u.operand);
2432 emitPutResult(srcDst, X86::edx);
2437 m_jit.link(iter->from, m_jit.label());
2438 emitPutArg(X86::eax, 0);
2439 emitPutArg(X86::edx, 4);
2440 emitCall(i, Machine::cti_op_bitxor);
2441 emitPutResult(instruction[i + 1].u.operand);
2446 m_jit.link(iter->from, m_jit.label());
2447 emitPutArg(X86::eax, 0);
2448 emitPutArg(X86::edx, 4);
2449 emitCall(i, Machine::cti_op_bitor);
2450 emitPutResult(instruction[i + 1].u.operand);
2455 m_jit.link(iter->from, m_jit.label());
2456 emitPutArg(X86::eax, 0);
2457 emitPutArg(X86::edx, 4);
2458 emitCall(i, Machine::cti_op_eq);
2459 emitPutResult(instruction[i + 1].u.operand);
2464 m_jit.link(iter->from, m_jit.label());
2465 emitPutArg(X86::eax, 0);
2466 emitPutArg(X86::edx, 4);
2467 emitCall(i, Machine::cti_op_neq);
2468 emitPutResult(instruction[i + 1].u.operand);
2472 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2473 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2474 case op_instanceof: {
2475 m_jit.link(iter->from, m_jit.label());
2476 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2477 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2478 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2479 emitCall(i, Machine::cti_op_instanceof);
2480 emitPutResult(instruction[i + 1].u.operand);
2485 X86Assembler::JmpSrc notImm1 = iter->from;
2486 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2487 m_jit.link((++iter)->from, m_jit.label());
2488 emitFastArithReTagImmediate(X86::eax);
2489 emitFastArithReTagImmediate(X86::ecx);
2490 m_jit.link(notImm1, m_jit.label());
2491 m_jit.link(notImm2, m_jit.label());
2492 emitPutArg(X86::eax, 0);
2493 emitPutArg(X86::ecx, 4);
2494 emitCall(i, Machine::cti_op_mod);
2495 emitPutResult(instruction[i + 1].u.operand);
2500 int dst = instruction[i + 1].u.operand;
2501 int src1 = instruction[i + 2].u.operand;
2502 int src2 = instruction[i + 3].u.operand;
2503 if (getConstantImmediateNumericArg(src1) || getConstantImmediateNumericArg(src2)) {
2504 m_jit.link(iter->from, m_jit.label());
2505 emitGetPutArg(src1, 0, X86::ecx);
2506 emitGetPutArg(src2, 4, X86::ecx);
2507 emitCall(i, Machine::cti_op_mul);
2510 compileBinaryArithOpSlowCase(op_mul, iter, dst, src1, src2, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2517 case op_construct: {
2518 m_jit.link(iter->from, m_jit.label());
2520 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2521 emitCall(i, Machine::cti_vm_compile);
2522 emitCall(i, X86::eax);
2524 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2526 // Put the return value in dst. In the interpreter, op_ret does this.
2527 emitPutResult(instruction[i + 1].u.operand);
2531 case op_to_jsnumber: {
2532 m_jit.link(iter->from, m_jit.label());
2534 emitPutArg(X86::eax, 0);
2535 emitCall(i, Machine::cti_op_to_jsnumber);
2537 emitPutResult(instruction[i + 1].u.operand);
2543 ASSERT_NOT_REACHED();
2547 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2550 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2553 void CTI::privateCompile()
2555 // Could use a popl_m, but would need to offset the following instruction if so.
2556 m_jit.popl_r(X86::ecx);
2557 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2559 privateCompileMainPass();
2560 privateCompileLinkPass();
2561 privateCompileSlowCases();
2563 ASSERT(m_jmpTable.isEmpty());
2565 void* code = m_jit.copy();
2568 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2569 for (unsigned i = 0; i < m_switches.size(); ++i) {
2570 SwitchRecord record = m_switches[i];
2571 unsigned opcodeIndex = record.m_opcodeIndex;
2573 if (record.m_type != SwitchRecord::String) {
2574 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2575 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2577 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2579 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2580 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2581 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2584 ASSERT(record.m_type == SwitchRecord::String);
2586 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2588 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2589 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2590 unsigned offset = it->second.branchOffset;
2591 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2596 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2597 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2599 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2601 X86Assembler::link(code, iter->from, iter->to);
2602 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2605 // Link absolute addresses for jsr
2606 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2607 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2609 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2610 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2611 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2612 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2615 m_codeBlock->ctiCode = code;
2618 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2620 // Check eax is an object of the right StructureID.
2621 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2622 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2623 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2624 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2626 // Checks out okay! - getDirectOffset
2627 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2628 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2631 void* code = m_jit.copy();
2634 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2635 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2637 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2639 ctiRepatchCallByReturnAddress(returnAddress, code);
2642 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2644 #if USE(CTI_REPATCH_PIC)
2645 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2647 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2648 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2650 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2651 // referencing the prototype object - let's speculatively load it's table nice and early!)
2652 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2653 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2654 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2656 // check eax is an object of the right StructureID.
2657 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2658 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2659 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2660 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2662 // Check the prototype object's StructureID had not changed.
2663 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2664 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2665 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2667 // Checks out okay! - getDirectOffset
2668 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2670 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2672 void* code = m_jit.copy();
2675 // Use the repatch information to link the failure cases back to the original slow case routine.
2676 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2677 X86Assembler::link(code, failureCases1, slowCaseBegin);
2678 X86Assembler::link(code, failureCases2, slowCaseBegin);
2679 X86Assembler::link(code, failureCases3, slowCaseBegin);
2681 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2682 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2683 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2685 // Track the stub we have created so that it will be deleted later.
2686 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2688 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2689 // FIXME: should revert this repatching, on failure.
2690 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2691 X86Assembler::repatchBranchOffset(jmpLocation, code);
2693 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2694 // referencing the prototype object - let's speculatively load it's table nice and early!)
2695 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2696 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2697 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2699 // check eax is an object of the right StructureID.
2700 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2701 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2702 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2703 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2705 // Check the prototype object's StructureID had not changed.
2706 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2707 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2708 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2710 // Checks out okay! - getDirectOffset
2711 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2715 void* code = m_jit.copy();
2718 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2719 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2720 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2722 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2724 ctiRepatchCallByReturnAddress(returnAddress, code);
2728 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2732 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2734 // Check eax is an object of the right StructureID.
2735 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2736 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2737 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2738 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2740 StructureID* currStructureID = structureID;
2741 RefPtr<StructureID>* chainEntries = chain->head();
2742 JSObject* protoObject = 0;
2743 for (unsigned i = 0; i<count; ++i) {
2744 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2745 currStructureID = chainEntries[i].get();
2747 // Check the prototype object's StructureID had not changed.
2748 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2749 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2750 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2752 ASSERT(protoObject);
2754 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2755 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2756 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2759 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2761 void* code = m_jit.copy();
2764 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2765 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2767 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2769 ctiRepatchCallByReturnAddress(returnAddress, code);
2772 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2774 // check eax is an object of the right StructureID.
2775 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2776 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2777 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2778 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2780 // checks out okay! - putDirectOffset
2781 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2782 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2785 void* code = m_jit.copy();
2788 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2789 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2791 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2793 ctiRepatchCallByReturnAddress(returnAddress, code);
2798 static JSValue* transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2800 baseObject->transitionTo(newStructureID);
2801 baseObject->putDirectOffset(cachedOffset, value);
2807 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2809 return oldStructureID->propertyStorageCapacity() != newStructureID->propertyStorageCapacity();
2812 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2814 Vector<X86Assembler::JmpSrc, 16> failureCases;
2815 // check eax is an object of the right StructureID.
2816 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2817 failureCases.append(m_jit.emitUnlinkedJne());
2818 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2819 failureCases.append(m_jit.emitUnlinkedJne());
2820 Vector<X86Assembler::JmpSrc> successCases;
2823 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2824 // proto(ecx) = baseObject->structureID()->prototype()
2825 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2826 failureCases.append(m_jit.emitUnlinkedJne());
2827 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2829 // ecx = baseObject->m_structureID
2830 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2831 // null check the prototype
2832 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2833 successCases.append(m_jit.emitUnlinkedJe());
2835 // Check the structure id
2836 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2837 failureCases.append(m_jit.emitUnlinkedJne());
2839 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2840 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2841 failureCases.append(m_jit.emitUnlinkedJne());
2842 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2845 failureCases.append(m_jit.emitUnlinkedJne());
2846 for (unsigned i = 0; i < successCases.size(); ++i)
2847 m_jit.link(successCases[i], m_jit.label());
2849 X86Assembler::JmpSrc callTarget;
2850 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2851 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2852 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2853 // codeblock should ensure oldStructureID->m_refCount > 0
2854 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2855 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2856 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2859 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2860 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2862 // Slow case transition -- we're going to need to quite a bit of work,
2863 // so just make a call
2864 m_jit.pushl_r(X86::edx);
2865 m_jit.pushl_r(X86::eax);
2866 m_jit.movl_i32r(cachedOffset, X86::eax);
2867 m_jit.pushl_r(X86::eax);
2868 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2869 m_jit.pushl_r(X86::eax);
2870 callTarget = m_jit.emitCall();
2871 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2875 X86Assembler::JmpSrc failureJump;
2876 if (failureCases.size()) {
2877 for (unsigned i = 0; i < failureCases.size(); ++i)
2878 m_jit.link(failureCases[i], m_jit.label());
2879 m_jit.emitRestoreArgumentReferenceForTrampoline();
2880 failureJump = m_jit.emitUnlinkedJmp();
2883 void* code = m_jit.copy();
2886 if (failureCases.size())
2887 X86Assembler::link(code, failureJump, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2889 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2890 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2892 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2894 ctiRepatchCallByReturnAddress(returnAddress, code);
2897 void* CTI::privateCompileArrayLengthTrampoline()
2899 // Check eax is an array
2900 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2901 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2902 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2903 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2905 // Checks out okay! - get the length from the storage
2906 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2907 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2909 m_jit.addl_rr(X86::eax, X86::eax);
2910 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2911 m_jit.addl_i8r(1, X86::eax);
2915 void* code = m_jit.copy();
2918 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2919 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2920 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2925 void* CTI::privateCompileStringLengthTrampoline()
2927 // Check eax is a string
2928 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2929 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2930 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2931 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2933 // Checks out okay! - get the length from the Ustring.
2934 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2935 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2937 m_jit.addl_rr(X86::eax, X86::eax);
2938 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2939 m_jit.addl_i8r(1, X86::eax);
2943 void* code = m_jit.copy();
2946 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2947 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2948 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2953 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2955 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2957 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2958 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2959 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2961 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2962 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2963 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2966 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2968 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2970 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2971 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2972 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2974 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2975 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2976 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2979 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2981 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2983 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2984 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2986 // Check eax is an array
2987 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2988 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2989 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2990 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2992 // Checks out okay! - get the length from the storage
2993 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2994 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2996 m_jit.addl_rr(X86::ecx, X86::ecx);
2997 X86Assembler::JmpSrc failureClobberedECX = m_jit.emitUnlinkedJo();
2998 m_jit.addl_i8r(1, X86::ecx);
3000 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
3002 m_jit.link(failureClobberedECX, m_jit.label());
3003 m_jit.emitRestoreArgumentReference();
3004 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJmp();
3006 void* code = m_jit.copy();
3009 // Use the repatch information to link the failure cases back to the original slow case routine.
3010 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
3011 X86Assembler::link(code, failureCases1, slowCaseBegin);
3012 X86Assembler::link(code, failureCases2, slowCaseBegin);
3013 X86Assembler::link(code, failureCases3, slowCaseBegin);
3015 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
3016 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
3017 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
3019 // Track the stub we have created so that it will be deleted later.
3020 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
3022 // Finally repatch the jump to sow case back in the hot path to jump here instead.
3023 // FIXME: should revert this repatching, on failure.
3024 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
3025 X86Assembler::repatchBranchOffset(jmpLocation, code);
3028 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
3030 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
3031 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
3032 m_jit.movl_mr(index * sizeof(Register), dst, dst);
3035 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
3037 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
3038 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
3039 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
3044 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
3046 // TODO: better error messages
3047 if (pattern.size() > MaxPatternSize) {
3048 *error_ptr = "regular expression too large";
3052 X86Assembler jit(exec->machine()->jitCodeBuffer());
3053 WRECParser parser(pattern, ignoreCase, multiline, jit);
3055 jit.emitConvertToFastCall();
3057 // Preserve regs & initialize outputRegister.
3058 jit.pushl_r(WRECGenerator::outputRegister);
3059 jit.pushl_r(WRECGenerator::currentValueRegister);
3060 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
3061 jit.pushl_r(WRECGenerator::currentPositionRegister);
3062 // load output pointer
3067 , X86::esp, WRECGenerator::outputRegister);
3069 // restart point on match fail.
3070 WRECGenerator::JmpDst nextLabel = jit.label();
3072 // (1) Parse Disjunction:
3074 // Parsing the disjunction should fully consume the pattern.
3075 JmpSrcVector failures;
3076 parser.parseDisjunction(failures);
3077 if (parser.isEndOfPattern()) {
3078 parser.m_err = WRECParser::Error_malformedPattern;
3081 // TODO: better error messages
3082 *error_ptr = "TODO: better error messages";
3087 // Set return value & pop registers from the stack.
3089 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
3090 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
3092 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
3093 jit.popl_r(X86::eax);
3094 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3095 jit.popl_r(WRECGenerator::currentValueRegister);
3096 jit.popl_r(WRECGenerator::outputRegister);
3099 jit.link(noOutput, jit.label());
3101 jit.popl_r(X86::eax);
3102 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3103 jit.popl_r(WRECGenerator::currentValueRegister);
3104 jit.popl_r(WRECGenerator::outputRegister);
3108 // All fails link to here. Progress the start point & if it is within scope, loop.
3109 // Otherwise, return fail value.
3110 WRECGenerator::JmpDst here = jit.label();
3111 for (unsigned i = 0; i < failures.size(); ++i)
3112 jit.link(failures[i], here);
3115 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
3116 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
3117 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
3118 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
3119 jit.link(jit.emitUnlinkedJle(), nextLabel);
3121 jit.addl_i8r(4, X86::esp);
3123 jit.movl_i32r(-1, X86::eax);
3124 jit.popl_r(WRECGenerator::currentValueRegister);
3125 jit.popl_r(WRECGenerator::outputRegister);
3128 *numSubpatterns_ptr = parser.m_numSubpatterns;
3130 void* code = jit.copy();
3135 #endif // ENABLE(WREC)
3139 #endif // ENABLE(CTI)