2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
34 #include "wrec/WREC.h"
40 #if COMPILER(GCC) && PLATFORM(X86)
42 ".globl _ctiTrampoline" "\n"
43 "_ctiTrampoline:" "\n"
46 "subl $0x24, %esp" "\n"
47 "movl $512, %esi" "\n"
48 "call *0x30(%esp)" "\n" //Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
49 "addl $0x24, %esp" "\n"
56 ".globl _ctiVMThrowTrampoline" "\n"
57 "_ctiVMThrowTrampoline:" "\n"
59 "movl 0x34(%esp), %ecx" "\n" //Ox34 = 0x0D * 4, 0x0D = CTI_ARGS_exec
60 "cmpl $0, 8(%ecx)" "\n"
65 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
66 "addl $0x24, %esp" "\n"
76 __declspec(naked) JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, CodeBlock* codeBlock, JSValue** exception, Profiler**)
92 __declspec(naked) void ctiVMThrowTrampoline()
96 call JSC::Machine::cti_vm_throw;
109 // get arg puts an arg from the SF register array into a h/w register
110 ALWAYS_INLINE void CTI::emitGetArg(unsigned src, X86Assembler::RegisterID dst)
112 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
113 if (src < m_codeBlock->constantRegisters.size()) {
114 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
115 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
117 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
120 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
121 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
123 if (src < m_codeBlock->constantRegisters.size()) {
124 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
125 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
127 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
128 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
132 // puts an arg onto the stack, as an arg to a context threaded function.
133 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
135 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
138 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
140 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
143 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
145 if (src < m_codeBlock->constantRegisters.size()) {
146 JSValue* js = m_codeBlock->constantRegisters[src].jsValue(m_exec);
147 return JSImmediate::isNumber(js) ? js : 0;
152 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
154 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
157 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
159 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
162 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
164 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
167 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
169 m_jit.movl_rm(from, -((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi);
172 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
174 m_jit.movl_mr(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - entry) * sizeof(Register), X86::edi, to);
177 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
179 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
180 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
183 #if ENABLE(SAMPLING_TOOL)
184 unsigned inCalledCode = 0;
187 void ctiSetReturnAddress(void** where, void* what)
192 void ctiRepatchCallByReturnAddress(void* where, void* what)
194 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
199 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
205 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
207 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
208 m_jit.cmpl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
209 X86Assembler::JmpSrc noException = m_jit.emitUnlinkedJe();
211 m_jit.link(noException, m_jit.label());
214 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
217 if (src1 < m_codeBlock->constantRegisters.size()) {
218 JSValue* js = m_codeBlock->constantRegisters[src1].jsValue(m_exec);
220 JSImmediate::isImmediate(js) ?
221 (JSImmediate::isNumber(js) ? 'i' :
222 JSImmediate::isBoolean(js) ? 'b' :
223 js->isUndefined() ? 'u' :
224 js->isNull() ? 'n' : '?')
226 (js->isString() ? 's' :
227 js->isObject() ? 'o' :
231 if (src2 < m_codeBlock->constantRegisters.size()) {
232 JSValue* js = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
234 JSImmediate::isImmediate(js) ?
235 (JSImmediate::isNumber(js) ? 'i' :
236 JSImmediate::isBoolean(js) ? 'b' :
237 js->isUndefined() ? 'u' :
238 js->isNull() ? 'n' : '?')
240 (js->isString() ? 's' :
241 js->isObject() ? 'o' :
244 if ((which1 != '*') | (which2 != '*'))
245 fprintf(stderr, "Types %c %c\n", which1, which2);
250 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
252 #if ENABLE(SAMPLING_TOOL)
253 m_jit.movl_i32m(1, &inCalledCode);
255 X86Assembler::JmpSrc call = m_jit.emitCall();
256 m_calls.append(CallRecord(call, helper, opcodeIndex));
257 emitDebugExceptionCheck();
258 #if ENABLE(SAMPLING_TOOL)
259 m_jit.movl_i32m(0, &inCalledCode);
265 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
267 #if ENABLE(SAMPLING_TOOL)
268 m_jit.movl_i32m(1, &inCalledCode);
270 X86Assembler::JmpSrc call = m_jit.emitCall();
271 m_calls.append(CallRecord(call, helper, opcodeIndex));
272 emitDebugExceptionCheck();
273 #if ENABLE(SAMPLING_TOOL)
274 m_jit.movl_i32m(0, &inCalledCode);
280 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
282 #if ENABLE(SAMPLING_TOOL)
283 m_jit.movl_i32m(1, &inCalledCode);
285 X86Assembler::JmpSrc call = m_jit.emitCall();
286 m_calls.append(CallRecord(call, helper, opcodeIndex));
287 emitDebugExceptionCheck();
288 #if ENABLE(SAMPLING_TOOL)
289 m_jit.movl_i32m(0, &inCalledCode);
295 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
297 #if ENABLE(SAMPLING_TOOL)
298 m_jit.movl_i32m(1, &inCalledCode);
300 X86Assembler::JmpSrc call = m_jit.emitCall();
301 m_calls.append(CallRecord(call, helper, opcodeIndex));
302 emitDebugExceptionCheck();
303 #if ENABLE(SAMPLING_TOOL)
304 m_jit.movl_i32m(0, &inCalledCode);
310 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
312 #if ENABLE(SAMPLING_TOOL)
313 m_jit.movl_i32m(1, &inCalledCode);
315 X86Assembler::JmpSrc call = m_jit.emitCall();
316 m_calls.append(CallRecord(call, helper, opcodeIndex));
317 emitDebugExceptionCheck();
318 #if ENABLE(SAMPLING_TOOL)
319 m_jit.movl_i32m(0, &inCalledCode);
325 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
327 m_jit.testl_i32r(JSImmediate::TagMask, reg);
328 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
331 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImm(X86Assembler::RegisterID reg, unsigned opcodeIndex)
333 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
334 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
337 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImms(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
339 m_jit.movl_rr(reg1, X86::ecx);
340 m_jit.andl_rr(reg2, X86::ecx);
341 emitJumpSlowCaseIfNotImm(X86::ecx, opcodeIndex);
344 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
346 ASSERT(JSImmediate::isNumber(imm));
347 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
350 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
352 // op_mod relies on this being a sub - setting zf if result is 0.
353 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
356 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
358 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
361 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
363 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
366 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
368 m_jit.sarl_i8r(1, reg);
371 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
373 m_jit.addl_rr(reg, reg);
374 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
375 emitFastArithReTagImmediate(reg);
378 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
380 m_jit.addl_rr(reg, reg);
381 emitFastArithReTagImmediate(reg);
384 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
385 : m_jit(machine->jitCodeBuffer())
388 , m_codeBlock(codeBlock)
389 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
390 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
394 #define CTI_COMPILE_BINARY_OP(name) \
396 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
397 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
398 emitCall(i, Machine::cti_##name); \
399 emitPutResult(instruction[i + 1].u.operand); \
404 #define CTI_COMPILE_UNARY_OP(name) \
406 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
407 emitCall(i, Machine::cti_##name); \
408 emitPutResult(instruction[i + 1].u.operand); \
413 #if ENABLE(SAMPLING_TOOL)
414 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
417 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
419 int dst = instruction[i + 1].u.operand;
420 int firstArg = instruction[i + 4].u.operand;
421 int argCount = instruction[i + 5].u.operand;
423 if (type == OpConstruct) {
424 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
425 emitPutArgConstant(argCount, 12);
426 emitPutArgConstant(firstArg, 8);
427 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
429 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 16);
430 emitPutArgConstant(argCount, 12);
431 emitPutArgConstant(firstArg, 8);
432 // FIXME: should this be loaded dynamically off m_exec?
433 int thisVal = instruction[i + 3].u.operand;
434 if (thisVal == missingThisObjectMarker()) {
435 emitPutArgConstant(reinterpret_cast<unsigned>(m_exec->globalThisValue()), 4);
437 emitGetPutArg(thisVal, 4, X86::ecx);
440 X86Assembler::JmpSrc wasEval;
441 if (type == OpCallEval) {
442 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
443 emitCall(i, Machine::cti_op_call_eval);
444 m_jit.emitRestoreArgumentReference();
446 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
448 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
449 wasEval = m_jit.emitUnlinkedJne();
451 // this reloads the first arg into ecx (checked just below).
452 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
454 // this sets up the first arg, and explicitly leaves the value in ecx (checked just below).
455 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
456 emitPutArg(X86::ecx, 0);
459 // initializeCallFrame!
460 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::CallerCodeBlock) * sizeof(Register), X86::edi);
461 m_jit.movl_i32m(reinterpret_cast<unsigned>(instruction + i), (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ReturnVPC) * sizeof(Register), X86::edi);
462 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
463 m_jit.movl_rm(X86::edx, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::CallerScopeChain) * sizeof(Register), X86::edi);
464 m_jit.movl_rm(X86::edi, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::CallerRegisters) * sizeof(Register), X86::edi);
465 m_jit.movl_i32m(dst, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ReturnValueRegister) * sizeof(Register), X86::edi);
466 m_jit.movl_i32m(firstArg, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ArgumentStartRegister) * sizeof(Register), X86::edi);
467 m_jit.movl_i32m(argCount, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::ArgumentCount) * sizeof(Register), X86::edi);
468 m_jit.movl_rm(X86::ecx, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::Callee) * sizeof(Register), X86::edi);
469 m_jit.movl_i32m(0, (firstArg - RegisterFile::CallFrameHeaderSize + RegisterFile::OptionalCalleeActivation) * sizeof(Register), X86::edi);
470 // CTIReturnEIP (set in callee)
472 // Fast check for JS function.
473 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
474 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
475 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
476 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
477 m_jit.link(isNotObject, m_jit.label());
479 // This handles host functions
480 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
481 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
483 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
484 m_jit.link(isJSFunction, m_jit.label());
486 // This handles JSFunctions
487 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction));
488 // Check the ctiCode has been generated - if not, this is handled in a slow case.
489 m_jit.testl_rr(X86::eax, X86::eax);
490 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
491 m_jit.call_r(X86::eax);
493 // In the interpreter the following actions are performed by op_ret:
495 // Store the scope chain - returned by op_ret in %edx (see below) - to ExecState::m_scopeChain and CTI_ARGS_scopeChain on the stack.
496 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
497 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
498 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
499 // Restore ExecState::m_callFrame.
500 m_jit.leal_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) * sizeof(Register), X86::edi, X86::edx);
501 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
502 // Restore CTI_ARGS_codeBlock.
503 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
505 X86Assembler::JmpDst end = m_jit.label();
506 m_jit.link(wasNotJSFunction, end);
507 if (type == OpCallEval)
508 m_jit.link(wasEval, end);
513 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
515 m_jit.subl_i8r(1, X86::esi);
516 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
517 emitCall(opcodeIndex, Machine::cti_timeout_check);
519 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
520 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_globalData), X86::ecx, X86::ecx);
521 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
522 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
523 m_jit.link(skipTimeout, m_jit.label());
526 void CTI::privateCompileMainPass()
528 Instruction* instruction = m_codeBlock->instructions.begin();
529 unsigned instructionCount = m_codeBlock->instructions.size();
531 unsigned structureIDInstructionIndex = 0;
533 for (unsigned i = 0; i < instructionCount; ) {
534 m_labels[i] = m_jit.label();
536 #if ENABLE(SAMPLING_TOOL)
537 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), ¤tOpcodeID);
540 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
541 m_jit.emitRestoreArgumentReference();
542 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
544 unsigned src = instruction[i + 2].u.operand;
545 if (src < m_codeBlock->constantRegisters.size())
546 m_jit.movl_i32r(reinterpret_cast<unsigned>(m_codeBlock->constantRegisters[src].jsValue(m_exec)), X86::edx);
548 emitGetArg(src, X86::edx);
549 emitPutResult(instruction[i + 1].u.operand, X86::edx);
554 unsigned dst = instruction[i + 1].u.operand;
555 unsigned src1 = instruction[i + 2].u.operand;
556 unsigned src2 = instruction[i + 3].u.operand;
557 if (src2 < m_codeBlock->constantRegisters.size()) {
558 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
559 if (JSImmediate::isNumber(value)) {
560 emitGetArg(src1, X86::eax);
561 emitJumpSlowCaseIfNotImm(X86::eax, i);
562 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
563 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
568 } else if (!(src1 < m_codeBlock->constantRegisters.size())) {
569 emitGetArg(src1, X86::eax);
570 emitGetArg(src2, X86::edx);
571 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
572 emitFastArithDeTagImmediate(X86::eax);
573 m_jit.addl_rr(X86::edx, X86::eax);
574 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
579 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
580 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
581 emitCall(i, Machine::cti_op_add);
582 emitPutResult(instruction[i + 1].u.operand);
587 if (m_codeBlock->needsFullScopeChain)
588 emitCall(i, Machine::cti_op_end);
589 emitGetArg(instruction[i + 1].u.operand, X86::eax);
590 #if ENABLE(SAMPLING_TOOL)
591 m_jit.movl_i32m(-1, ¤tOpcodeID);
593 m_jit.pushl_m(-((m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi);
599 unsigned target = instruction[i + 1].u.operand;
600 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
605 int srcDst = instruction[i + 1].u.operand;
606 emitGetArg(srcDst, X86::eax);
607 emitJumpSlowCaseIfNotImm(X86::eax, i);
608 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
609 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
610 emitPutResult(srcDst, X86::eax);
615 emitSlowScriptCheck(i);
617 unsigned target = instruction[i + 1].u.operand;
618 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
622 case op_loop_if_less: {
623 emitSlowScriptCheck(i);
625 unsigned target = instruction[i + 3].u.operand;
626 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
628 emitGetArg(instruction[i + 1].u.operand, X86::edx);
629 emitJumpSlowCaseIfNotImm(X86::edx, i);
630 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
631 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
633 emitGetArg(instruction[i + 1].u.operand, X86::eax);
634 emitGetArg(instruction[i + 2].u.operand, X86::edx);
635 emitJumpSlowCaseIfNotImm(X86::eax, i);
636 emitJumpSlowCaseIfNotImm(X86::edx, i);
637 m_jit.cmpl_rr(X86::edx, X86::eax);
638 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
643 case op_loop_if_lesseq: {
644 emitSlowScriptCheck(i);
646 unsigned target = instruction[i + 3].u.operand;
647 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
649 emitGetArg(instruction[i + 1].u.operand, X86::edx);
650 emitJumpSlowCaseIfNotImm(X86::edx, i);
651 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
652 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
654 emitGetArg(instruction[i + 1].u.operand, X86::eax);
655 emitGetArg(instruction[i + 2].u.operand, X86::edx);
656 emitJumpSlowCaseIfNotImm(X86::eax, i);
657 emitJumpSlowCaseIfNotImm(X86::edx, i);
658 m_jit.cmpl_rr(X86::edx, X86::eax);
659 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
664 case op_new_object: {
665 emitCall(i, Machine::cti_op_new_object);
666 emitPutResult(instruction[i + 1].u.operand);
671 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
672 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
673 // such that the StructureID & offset are always at the same distance from this.
675 emitGetArg(instruction[i + 1].u.operand, X86::eax);
676 emitGetArg(instruction[i + 3].u.operand, X86::edx);
678 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
679 X86Assembler::JmpDst hotPathBegin = m_jit.label();
680 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
681 ++structureIDInstructionIndex;
683 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
684 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
685 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
686 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
687 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
688 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
690 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
691 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
692 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
693 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
699 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
700 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
701 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
702 // to jump back to if one of these trampolies finds a match.
704 emitGetArg(instruction[i + 2].u.operand, X86::eax);
706 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
708 X86Assembler::JmpDst hotPathBegin = m_jit.label();
709 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
710 ++structureIDInstructionIndex;
712 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
713 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
714 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
715 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
716 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
718 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
719 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
720 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
721 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
726 case op_instanceof: {
727 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
728 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
729 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
730 emitCall(i, Machine::cti_op_instanceof);
731 emitPutResult(instruction[i + 1].u.operand);
736 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
737 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
738 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
739 emitCall(i, Machine::cti_op_del_by_id);
740 emitPutResult(instruction[i + 1].u.operand);
745 unsigned dst = instruction[i + 1].u.operand;
746 unsigned src1 = instruction[i + 2].u.operand;
747 unsigned src2 = instruction[i + 3].u.operand;
748 if (src1 < m_codeBlock->constantRegisters.size() || src2 < m_codeBlock->constantRegisters.size()) {
749 unsigned constant = src1;
750 unsigned nonconstant = src2;
751 if (!(src1 < m_codeBlock->constantRegisters.size())) {
755 JSValue* value = m_codeBlock->constantRegisters[constant].jsValue(m_exec);
756 if (JSImmediate::isNumber(value)) {
757 emitGetArg(nonconstant, X86::eax);
758 emitJumpSlowCaseIfNotImm(X86::eax, i);
759 emitFastArithImmToInt(X86::eax);
760 m_jit.imull_i32r( X86::eax, getDeTaggedConstantImmediate(value), X86::eax);
761 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
762 emitFastArithPotentiallyReTagImmediate(X86::eax);
769 emitGetArg(src1, X86::eax);
770 emitGetArg(src2, X86::edx);
771 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
772 emitFastArithDeTagImmediate(X86::eax);
773 emitFastArithImmToInt(X86::edx);
774 m_jit.imull_rr(X86::edx, X86::eax);
775 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
776 emitFastArithPotentiallyReTagImmediate(X86::eax);
782 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
783 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
784 emitCall(i, Machine::cti_op_new_func);
785 emitPutResult(instruction[i + 1].u.operand);
790 compileOpCall(instruction, i);
794 case op_get_global_var: {
795 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
796 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
797 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
798 emitPutResult(instruction[i + 1].u.operand, X86::eax);
802 case op_put_global_var: {
803 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
804 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
805 emitGetArg(instruction[i + 3].u.operand, X86::edx);
806 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
810 case op_get_scoped_var: {
811 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
813 emitGetCTIParam(CTI_ARGS_scopeChain, X86::eax);
815 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
817 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
818 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
819 emitPutResult(instruction[i + 1].u.operand);
823 case op_put_scoped_var: {
824 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
826 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
827 emitGetArg(instruction[i + 3].u.operand, X86::eax);
829 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
831 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
832 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
837 // Check for an activation - if there is one, jump to the hook below.
838 m_jit.cmpl_i32m(0, -(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::OptionalCalleeActivation) * sizeof(Register), X86::edi);
839 X86Assembler::JmpSrc activation = m_jit.emitUnlinkedJne();
840 X86Assembler::JmpDst activated = m_jit.label();
842 // Check for a profiler - if there is one, jump to the hook below.
843 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
844 m_jit.cmpl_i32m(0, X86::eax);
845 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
846 X86Assembler::JmpDst profiled = m_jit.label();
848 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
849 if (m_codeBlock->needsFullScopeChain)
850 emitCall(i, Machine::cti_op_ret_scopeChain);
852 // Return the result in %eax, and the caller scope chain in %edx (this is read from the callee call frame,
853 // but is only assigned to ExecState::m_scopeChain if returning to a JSFunction).
854 emitGetArg(instruction[i + 1].u.operand, X86::eax);
855 m_jit.movl_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::CallerScopeChain) * sizeof(Register), X86::edi, X86::edx);
856 // Restore the machine return addess from the callframe, roll the callframe back to the caller callframe,
857 // and preserve a copy of r on the stack at CTI_ARGS_r.
858 m_jit.movl_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::CTIReturnEIP) * sizeof(Register), X86::edi, X86::ecx);
859 m_jit.movl_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize - RegisterFile::CallerRegisters) * sizeof(Register), X86::edi, X86::edi);
860 emitPutCTIParam(X86::edi, CTI_ARGS_r);
862 m_jit.pushl_r(X86::ecx);
866 m_jit.link(activation, m_jit.label());
867 emitCall(i, Machine::cti_op_ret_activation);
868 m_jit.link(m_jit.emitUnlinkedJmp(), activated);
871 m_jit.link(profile, m_jit.label());
872 emitCall(i, Machine::cti_op_ret_profiler);
873 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
879 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
880 emitPutArg(X86::edx, 0);
881 emitPutArgConstant(instruction[i + 3].u.operand, 4);
882 emitCall(i, Machine::cti_op_new_array);
883 emitPutResult(instruction[i + 1].u.operand);
888 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
889 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
890 emitCall(i, Machine::cti_op_resolve);
891 emitPutResult(instruction[i + 1].u.operand);
896 compileOpCall(instruction, i, OpConstruct);
900 case op_construct_verify: {
901 emitGetArg(instruction[i + 1].u.operand, X86::eax);
903 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
904 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
905 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
906 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_type), X86::ecx);
907 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
909 m_jit.link(isImmediate, m_jit.label());
910 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
911 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
912 m_jit.link(isObject, m_jit.label());
917 case op_get_by_val: {
918 emitGetArg(instruction[i + 2].u.operand, X86::eax);
919 emitGetArg(instruction[i + 3].u.operand, X86::edx);
920 emitJumpSlowCaseIfNotImm(X86::edx, i);
921 emitFastArithImmToInt(X86::edx);
922 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
923 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
924 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
925 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
927 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
928 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
929 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
930 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
932 // Get the value from the vector
933 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
934 emitPutResult(instruction[i + 1].u.operand);
938 case op_resolve_func: {
939 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
940 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
941 emitCall(i, Machine::cti_op_resolve_func);
942 emitPutResult(instruction[i + 1].u.operand);
943 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
944 emitPutResult(instruction[i + 2].u.operand);
949 emitGetArg(instruction[i + 2].u.operand, X86::eax);
950 emitGetArg(instruction[i + 3].u.operand, X86::edx);
951 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
952 m_jit.subl_rr(X86::edx, X86::eax);
953 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
954 emitFastArithReTagImmediate(X86::eax);
955 emitPutResult(instruction[i + 1].u.operand);
959 case op_put_by_val: {
960 emitGetArg(instruction[i + 1].u.operand, X86::eax);
961 emitGetArg(instruction[i + 2].u.operand, X86::edx);
962 emitJumpSlowCaseIfNotImm(X86::edx, i);
963 emitFastArithImmToInt(X86::edx);
964 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
965 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
966 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
967 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
969 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
970 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
971 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
972 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
973 // No; oh well, check if the access if within the vector - if so, we may still be okay.
974 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
975 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
977 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
978 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
979 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
980 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
982 // All good - put the value into the array.
983 m_jit.link(inFastVector, m_jit.label());
984 emitGetArg(instruction[i + 3].u.operand, X86::eax);
985 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
989 CTI_COMPILE_BINARY_OP(op_lesseq)
990 case op_loop_if_true: {
991 emitSlowScriptCheck(i);
993 unsigned target = instruction[i + 2].u.operand;
994 emitGetArg(instruction[i + 1].u.operand, X86::eax);
996 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
997 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
998 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
999 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1001 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1002 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1003 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1004 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1006 m_jit.link(isZero, m_jit.label());
1010 case op_resolve_base: {
1011 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1012 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1013 emitCall(i, Machine::cti_op_resolve_base);
1014 emitPutResult(instruction[i + 1].u.operand);
1019 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1020 emitCall(i, Machine::cti_op_negate);
1021 emitPutResult(instruction[i + 1].u.operand);
1025 case op_resolve_skip: {
1026 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1027 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1028 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1029 emitCall(i, Machine::cti_op_resolve_skip);
1030 emitPutResult(instruction[i + 1].u.operand);
1034 case op_resolve_global: {
1036 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1037 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1038 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1039 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1041 // Check StructureID of global object
1042 m_jit.movl_i32r(globalObject, X86::eax);
1043 m_jit.movl_mr(structureIDAddr, X86::edx);
1044 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1045 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1046 m_slowCases.append(SlowCaseEntry(slowCase, i));
1048 // Load cached property
1049 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1050 m_jit.movl_mr(offsetAddr, X86::edx);
1051 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1052 emitPutResult(instruction[i + 1].u.operand);
1053 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1056 m_jit.link(slowCase, m_jit.label());
1057 emitPutArgConstant(globalObject, 0);
1058 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1059 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1060 emitCall(i, Machine::cti_op_resolve_global);
1061 emitPutResult(instruction[i + 1].u.operand);
1062 m_jit.link(end, m_jit.label());
1064 ++structureIDInstructionIndex;
1067 CTI_COMPILE_BINARY_OP(op_div)
1069 int srcDst = instruction[i + 1].u.operand;
1070 emitGetArg(srcDst, X86::eax);
1071 emitJumpSlowCaseIfNotImm(X86::eax, i);
1072 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1073 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1074 emitPutResult(srcDst, X86::eax);
1079 unsigned target = instruction[i + 3].u.operand;
1080 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1082 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1083 emitJumpSlowCaseIfNotImm(X86::edx, i);
1084 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1085 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1087 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1088 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1089 emitJumpSlowCaseIfNotImm(X86::eax, i);
1090 emitJumpSlowCaseIfNotImm(X86::edx, i);
1091 m_jit.cmpl_rr(X86::edx, X86::eax);
1092 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1098 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1099 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1100 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1101 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1102 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1103 emitPutResult(instruction[i + 1].u.operand);
1108 unsigned target = instruction[i + 2].u.operand;
1109 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1111 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1112 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1113 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1114 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1116 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1117 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1118 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1119 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1121 m_jit.link(isNonZero, m_jit.label());
1126 int srcDst = instruction[i + 2].u.operand;
1127 emitGetArg(srcDst, X86::eax);
1128 m_jit.movl_rr(X86::eax, X86::edx);
1129 emitJumpSlowCaseIfNotImm(X86::eax, i);
1130 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1131 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1132 emitPutResult(srcDst, X86::edx);
1133 emitPutResult(instruction[i + 1].u.operand);
1137 case op_unexpected_load: {
1138 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1139 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1140 emitPutResult(instruction[i + 1].u.operand);
1145 int retAddrDst = instruction[i + 1].u.operand;
1146 int target = instruction[i + 2].u.operand;
1147 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1148 X86Assembler::JmpDst addrPosition = m_jit.label();
1149 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1150 X86Assembler::JmpDst sretTarget = m_jit.label();
1151 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1156 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1160 CTI_COMPILE_BINARY_OP(op_eq)
1162 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1163 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1164 emitJumpSlowCaseIfNotImm(X86::eax, i);
1165 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1166 emitFastArithImmToInt(X86::eax);
1167 emitFastArithImmToInt(X86::ecx);
1168 m_jit.shll_CLr(X86::eax);
1169 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1170 emitPutResult(instruction[i + 1].u.operand);
1175 unsigned src1 = instruction[i + 2].u.operand;
1176 unsigned src2 = instruction[i + 3].u.operand;
1177 unsigned dst = instruction[i + 1].u.operand;
1178 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1179 emitGetArg(src2, X86::eax);
1180 emitJumpSlowCaseIfNotImm(X86::eax, i);
1181 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1183 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1184 emitGetArg(src1, X86::eax);
1185 emitJumpSlowCaseIfNotImm(X86::eax, i);
1186 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1189 emitGetArg(src1, X86::eax);
1190 emitGetArg(src2, X86::edx);
1191 m_jit.andl_rr(X86::edx, X86::eax);
1192 emitJumpSlowCaseIfNotImm(X86::eax, i);
1199 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1200 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1201 emitJumpSlowCaseIfNotImm(X86::eax, i);
1202 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1203 emitFastArithImmToInt(X86::ecx);
1204 m_jit.sarl_CLr(X86::eax);
1205 emitFastArithPotentiallyReTagImmediate(X86::eax);
1206 emitPutResult(instruction[i + 1].u.operand);
1211 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1212 emitJumpSlowCaseIfNotImm(X86::eax, i);
1213 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1214 emitPutResult(instruction[i + 1].u.operand);
1218 case op_resolve_with_base: {
1219 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1220 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1221 emitCall(i, Machine::cti_op_resolve_with_base);
1222 emitPutResult(instruction[i + 1].u.operand);
1223 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1224 emitPutResult(instruction[i + 2].u.operand);
1228 case op_new_func_exp: {
1229 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1230 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1231 emitCall(i, Machine::cti_op_new_func_exp);
1232 emitPutResult(instruction[i + 1].u.operand);
1237 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1238 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1239 emitJumpSlowCaseIfNotImm(X86::eax, i);
1240 emitJumpSlowCaseIfNotImm(X86::ecx, i);
1241 emitFastArithDeTagImmediate(X86::eax);
1242 emitFastArithDeTagImmediate(X86::ecx);
1243 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1245 m_jit.idivl_r(X86::ecx);
1246 emitFastArithReTagImmediate(X86::edx);
1247 m_jit.movl_rr(X86::edx, X86::eax);
1248 emitPutResult(instruction[i + 1].u.operand);
1253 unsigned target = instruction[i + 2].u.operand;
1254 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1256 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1257 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1258 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1259 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1261 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1262 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1263 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1264 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1266 m_jit.link(isZero, m_jit.label());
1270 CTI_COMPILE_BINARY_OP(op_less)
1271 CTI_COMPILE_BINARY_OP(op_neq)
1273 int srcDst = instruction[i + 2].u.operand;
1274 emitGetArg(srcDst, X86::eax);
1275 m_jit.movl_rr(X86::eax, X86::edx);
1276 emitJumpSlowCaseIfNotImm(X86::eax, i);
1277 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1278 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1279 emitPutResult(srcDst, X86::edx);
1280 emitPutResult(instruction[i + 1].u.operand);
1284 CTI_COMPILE_BINARY_OP(op_urshift)
1286 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1287 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1288 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
1289 m_jit.xorl_rr(X86::edx, X86::eax);
1290 emitFastArithReTagImmediate(X86::eax);
1291 emitPutResult(instruction[i + 1].u.operand);
1295 case op_new_regexp: {
1296 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1297 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1298 emitCall(i, Machine::cti_op_new_regexp);
1299 emitPutResult(instruction[i + 1].u.operand);
1304 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1305 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1306 emitJumpSlowCaseIfNotImms(X86::eax, X86::edx, i);
1307 m_jit.orl_rr(X86::edx, X86::eax);
1308 emitPutResult(instruction[i + 1].u.operand);
1312 case op_call_eval: {
1313 compileOpCall(instruction, i, OpCallEval);
1318 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1319 emitCall(i, Machine::cti_op_throw);
1320 m_jit.addl_i8r(0x24, X86::esp);
1321 m_jit.popl_r(X86::edi);
1322 m_jit.popl_r(X86::esi);
1327 case op_get_pnames: {
1328 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1329 emitCall(i, Machine::cti_op_get_pnames);
1330 emitPutResult(instruction[i + 1].u.operand);
1334 case op_next_pname: {
1335 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1336 unsigned target = instruction[i + 3].u.operand;
1337 emitCall(i, Machine::cti_op_next_pname);
1338 m_jit.testl_rr(X86::eax, X86::eax);
1339 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1340 emitPutResult(instruction[i + 1].u.operand);
1341 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1342 m_jit.link(endOfIter, m_jit.label());
1346 case op_push_scope: {
1347 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1348 emitCall(i, Machine::cti_op_push_scope);
1352 case op_pop_scope: {
1353 emitCall(i, Machine::cti_op_pop_scope);
1357 CTI_COMPILE_UNARY_OP(op_typeof)
1358 CTI_COMPILE_UNARY_OP(op_is_undefined)
1359 CTI_COMPILE_UNARY_OP(op_is_boolean)
1360 CTI_COMPILE_UNARY_OP(op_is_number)
1361 CTI_COMPILE_UNARY_OP(op_is_string)
1362 CTI_COMPILE_UNARY_OP(op_is_object)
1363 CTI_COMPILE_UNARY_OP(op_is_function)
1364 CTI_COMPILE_BINARY_OP(op_stricteq)
1365 CTI_COMPILE_BINARY_OP(op_nstricteq)
1366 case op_to_jsnumber: {
1367 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1368 emitCall(i, Machine::cti_op_to_jsnumber);
1369 emitPutResult(instruction[i + 1].u.operand);
1374 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1375 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1376 emitCall(i, Machine::cti_op_in);
1377 emitPutResult(instruction[i + 1].u.operand);
1381 case op_push_new_scope: {
1382 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1383 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1384 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1385 emitCall(i, Machine::cti_op_push_new_scope);
1386 emitPutResult(instruction[i + 1].u.operand);
1391 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1392 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1393 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_exception), X86::ecx, X86::eax);
1394 m_jit.movl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
1395 emitPutResult(instruction[i + 1].u.operand);
1399 case op_jmp_scopes: {
1400 unsigned count = instruction[i + 1].u.operand;
1401 emitPutArgConstant(count, 0);
1402 emitCall(i, Machine::cti_op_jmp_scopes);
1403 unsigned target = instruction[i + 2].u.operand;
1404 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1408 case op_put_by_index: {
1409 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1410 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1411 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1412 emitCall(i, Machine::cti_op_put_by_index);
1416 case op_switch_imm: {
1417 unsigned tableIndex = instruction[i + 1].u.operand;
1418 unsigned defaultOffset = instruction[i + 2].u.operand;
1419 unsigned scrutinee = instruction[i + 3].u.operand;
1421 // create jump table for switch destinations, track this switch statement.
1422 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1423 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1424 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1426 emitGetPutArg(scrutinee, 0, X86::ecx);
1427 emitPutArgConstant(tableIndex, 4);
1428 emitCall(i, Machine::cti_op_switch_imm);
1429 m_jit.jmp_r(X86::eax);
1433 case op_switch_char: {
1434 unsigned tableIndex = instruction[i + 1].u.operand;
1435 unsigned defaultOffset = instruction[i + 2].u.operand;
1436 unsigned scrutinee = instruction[i + 3].u.operand;
1438 // create jump table for switch destinations, track this switch statement.
1439 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1440 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1441 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1443 emitGetPutArg(scrutinee, 0, X86::ecx);
1444 emitPutArgConstant(tableIndex, 4);
1445 emitCall(i, Machine::cti_op_switch_char);
1446 m_jit.jmp_r(X86::eax);
1450 case op_switch_string: {
1451 unsigned tableIndex = instruction[i + 1].u.operand;
1452 unsigned defaultOffset = instruction[i + 2].u.operand;
1453 unsigned scrutinee = instruction[i + 3].u.operand;
1455 // create jump table for switch destinations, track this switch statement.
1456 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1457 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1459 emitGetPutArg(scrutinee, 0, X86::ecx);
1460 emitPutArgConstant(tableIndex, 4);
1461 emitCall(i, Machine::cti_op_switch_string);
1462 m_jit.jmp_r(X86::eax);
1466 case op_del_by_val: {
1467 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1468 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1469 emitCall(i, Machine::cti_op_del_by_val);
1470 emitPutResult(instruction[i + 1].u.operand);
1474 case op_put_getter: {
1475 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1476 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1477 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1478 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1479 emitCall(i, Machine::cti_op_put_getter);
1483 case op_put_setter: {
1484 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1485 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1486 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1487 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1488 emitCall(i, Machine::cti_op_put_setter);
1492 case op_new_error: {
1493 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1494 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1495 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1496 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1497 emitCall(i, Machine::cti_op_new_error);
1498 emitPutResult(instruction[i + 1].u.operand);
1503 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1504 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1505 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1506 emitCall(i, Machine::cti_op_debug);
1511 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1512 emitCall(i, Machine::cti_op_eq_null);
1513 emitPutResult(instruction[i + 1].u.operand);
1518 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1519 emitCall(i, Machine::cti_op_neq_null);
1520 emitPutResult(instruction[i + 1].u.operand);
1524 case op_get_array_length:
1525 case op_get_by_id_chain:
1526 case op_get_by_id_generic:
1527 case op_get_by_id_proto:
1528 case op_get_by_id_self:
1529 case op_get_string_length:
1530 case op_put_by_id_generic:
1531 case op_put_by_id_replace:
1532 case op_put_by_id_transition:
1533 ASSERT_NOT_REACHED();
1537 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
1541 void CTI::privateCompileLinkPass()
1543 unsigned jmpTableCount = m_jmpTable.size();
1544 for (unsigned i = 0; i < jmpTableCount; ++i)
1545 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
1549 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
1551 m_jit.link(iter->from, m_jit.label()); \
1552 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
1553 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
1554 emitCall(i, Machine::cti_##name); \
1555 emitPutResult(instruction[i + 1].u.operand); \
1560 void CTI::privateCompileSlowCases()
1562 unsigned structureIDInstructionIndex = 0;
1564 Instruction* instruction = m_codeBlock->instructions.begin();
1565 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
1566 unsigned i = iter->to;
1567 m_jit.emitRestoreArgumentReference();
1568 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
1570 unsigned dst = instruction[i + 1].u.operand;
1571 unsigned src2 = instruction[i + 3].u.operand;
1572 if (src2 < m_codeBlock->constantRegisters.size()) {
1573 JSValue* value = m_codeBlock->constantRegisters[src2].jsValue(m_exec);
1574 if (JSImmediate::isNumber(value)) {
1575 X86Assembler::JmpSrc notImm = iter->from;
1576 m_jit.link((++iter)->from, m_jit.label());
1577 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
1578 m_jit.link(notImm, m_jit.label());
1579 emitPutArg(X86::eax, 0);
1580 emitGetPutArg(src2, 4, X86::ecx);
1581 emitCall(i, Machine::cti_op_add);
1588 ASSERT(!(static_cast<unsigned>(instruction[i + 2].u.operand) < m_codeBlock->constantRegisters.size()));
1590 X86Assembler::JmpSrc notImm = iter->from;
1591 m_jit.link((++iter)->from, m_jit.label());
1592 m_jit.subl_rr(X86::edx, X86::eax);
1593 emitFastArithReTagImmediate(X86::eax);
1594 m_jit.link(notImm, m_jit.label());
1595 emitPutArg(X86::eax, 0);
1596 emitPutArg(X86::edx, 4);
1597 emitCall(i, Machine::cti_op_add);
1602 case op_get_by_val: {
1603 // The slow case that handles accesses to arrays (below) may jump back up to here.
1604 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
1606 X86Assembler::JmpSrc notImm = iter->from;
1607 m_jit.link((++iter)->from, m_jit.label());
1608 m_jit.link((++iter)->from, m_jit.label());
1609 emitFastArithIntToImmNoCheck(X86::edx);
1610 m_jit.link(notImm, m_jit.label());
1611 emitPutArg(X86::eax, 0);
1612 emitPutArg(X86::edx, 4);
1613 emitCall(i, Machine::cti_op_get_by_val);
1614 emitPutResult(instruction[i + 1].u.operand);
1615 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1617 // This is slow case that handles accesses to arrays above the fast cut-off.
1618 // First, check if this is an access to the vector
1619 m_jit.link((++iter)->from, m_jit.label());
1620 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1621 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
1623 // okay, missed the fast region, but it is still in the vector. Get the value.
1624 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
1625 // Check whether the value loaded is zero; if so we need to return undefined.
1626 m_jit.testl_rr(X86::ecx, X86::ecx);
1627 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
1628 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1634 X86Assembler::JmpSrc notImm = iter->from;
1635 m_jit.link((++iter)->from, m_jit.label());
1636 m_jit.addl_rr(X86::edx, X86::eax);
1637 m_jit.link(notImm, m_jit.label());
1638 emitPutArg(X86::eax, 0);
1639 emitPutArg(X86::edx, 4);
1640 emitCall(i, Machine::cti_op_sub);
1641 emitPutResult(instruction[i + 1].u.operand);
1646 m_jit.link(iter->from, m_jit.label());
1647 m_jit.link((++iter)->from, m_jit.label());
1648 emitPutArg(X86::eax, 0);
1649 emitPutArg(X86::ecx, 4);
1650 emitCall(i, Machine::cti_op_rshift);
1651 emitPutResult(instruction[i + 1].u.operand);
1656 X86Assembler::JmpSrc notImm1 = iter->from;
1657 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1658 m_jit.link((++iter)->from, m_jit.label());
1659 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1660 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1661 m_jit.link(notImm1, m_jit.label());
1662 m_jit.link(notImm2, m_jit.label());
1663 emitPutArg(X86::eax, 0);
1664 emitPutArg(X86::ecx, 4);
1665 emitCall(i, Machine::cti_op_lshift);
1666 emitPutResult(instruction[i + 1].u.operand);
1670 case op_loop_if_less: {
1671 emitSlowScriptCheck(i);
1673 unsigned target = instruction[i + 3].u.operand;
1674 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1676 m_jit.link(iter->from, m_jit.label());
1677 emitPutArg(X86::edx, 0);
1678 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1679 emitCall(i, Machine::cti_op_loop_if_less);
1680 m_jit.testl_rr(X86::eax, X86::eax);
1681 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1683 m_jit.link(iter->from, m_jit.label());
1684 m_jit.link((++iter)->from, m_jit.label());
1685 emitPutArg(X86::eax, 0);
1686 emitPutArg(X86::edx, 4);
1687 emitCall(i, Machine::cti_op_loop_if_less);
1688 m_jit.testl_rr(X86::eax, X86::eax);
1689 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1694 case op_put_by_id: {
1695 m_jit.link(iter->from, m_jit.label());
1696 m_jit.link((++iter)->from, m_jit.label());
1698 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1699 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1700 emitPutArg(X86::eax, 0);
1701 emitPutArg(X86::edx, 8);
1702 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
1704 // Track the location of the call; this will be used to recover repatch information.
1705 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1706 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1707 ++structureIDInstructionIndex;
1712 case op_get_by_id: {
1713 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1714 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1715 // of the call (which we can use to look up the repatch information), but should a array-length or
1716 // prototype access tramopile fail we want to bail out back to here. To do so we can subtract back
1717 // the distance from the call to the head of the slow case.
1719 m_jit.link(iter->from, m_jit.label());
1720 m_jit.link((++iter)->from, m_jit.label());
1723 X86Assembler::JmpDst coldPathBegin = m_jit.label();
1725 emitPutArg(X86::eax, 0);
1726 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1727 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1728 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
1729 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
1730 emitPutResult(instruction[i + 1].u.operand);
1732 // Track the location of the call; this will be used to recover repatch information.
1733 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1734 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
1735 ++structureIDInstructionIndex;
1740 case op_resolve_global: {
1741 ++structureIDInstructionIndex;
1745 case op_loop_if_lesseq: {
1746 emitSlowScriptCheck(i);
1748 unsigned target = instruction[i + 3].u.operand;
1749 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1751 m_jit.link(iter->from, m_jit.label());
1752 emitPutArg(X86::edx, 0);
1753 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1754 emitCall(i, Machine::cti_op_loop_if_lesseq);
1755 m_jit.testl_rr(X86::eax, X86::eax);
1756 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1758 m_jit.link(iter->from, m_jit.label());
1759 m_jit.link((++iter)->from, m_jit.label());
1760 emitPutArg(X86::eax, 0);
1761 emitPutArg(X86::edx, 4);
1762 emitCall(i, Machine::cti_op_loop_if_lesseq);
1763 m_jit.testl_rr(X86::eax, X86::eax);
1764 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
1770 unsigned srcDst = instruction[i + 1].u.operand;
1771 X86Assembler::JmpSrc notImm = iter->from;
1772 m_jit.link((++iter)->from, m_jit.label());
1773 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1774 m_jit.link(notImm, m_jit.label());
1775 emitPutArg(X86::eax, 0);
1776 emitCall(i, Machine::cti_op_pre_inc);
1777 emitPutResult(srcDst);
1781 case op_put_by_val: {
1782 // Normal slow cases - either is not an immediate imm, or is an array.
1783 X86Assembler::JmpSrc notImm = iter->from;
1784 m_jit.link((++iter)->from, m_jit.label());
1785 m_jit.link((++iter)->from, m_jit.label());
1786 emitFastArithIntToImmNoCheck(X86::edx);
1787 m_jit.link(notImm, m_jit.label());
1788 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1789 emitPutArg(X86::eax, 0);
1790 emitPutArg(X86::edx, 4);
1791 emitPutArg(X86::ecx, 8);
1792 emitCall(i, Machine::cti_op_put_by_val);
1793 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
1795 // slow cases for immediate int accesses to arrays
1796 m_jit.link((++iter)->from, m_jit.label());
1797 m_jit.link((++iter)->from, m_jit.label());
1798 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1799 emitPutArg(X86::eax, 0);
1800 emitPutArg(X86::edx, 4);
1801 emitPutArg(X86::ecx, 8);
1802 emitCall(i, Machine::cti_op_put_by_val_array);
1807 case op_loop_if_true: {
1808 emitSlowScriptCheck(i);
1810 m_jit.link(iter->from, m_jit.label());
1811 emitPutArg(X86::eax, 0);
1812 emitCall(i, Machine::cti_op_jtrue);
1813 m_jit.testl_rr(X86::eax, X86::eax);
1814 unsigned target = instruction[i + 2].u.operand;
1815 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
1820 unsigned srcDst = instruction[i + 1].u.operand;
1821 X86Assembler::JmpSrc notImm = iter->from;
1822 m_jit.link((++iter)->from, m_jit.label());
1823 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1824 m_jit.link(notImm, m_jit.label());
1825 emitPutArg(X86::eax, 0);
1826 emitCall(i, Machine::cti_op_pre_dec);
1827 emitPutResult(srcDst);
1832 unsigned target = instruction[i + 3].u.operand;
1833 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1835 m_jit.link(iter->from, m_jit.label());
1836 emitPutArg(X86::edx, 0);
1837 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
1838 emitCall(i, Machine::cti_op_jless);
1839 m_jit.testl_rr(X86::eax, X86::eax);
1840 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1842 m_jit.link(iter->from, m_jit.label());
1843 m_jit.link((++iter)->from, m_jit.label());
1844 emitPutArg(X86::eax, 0);
1845 emitPutArg(X86::edx, 4);
1846 emitCall(i, Machine::cti_op_jless);
1847 m_jit.testl_rr(X86::eax, X86::eax);
1848 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
1854 m_jit.link(iter->from, m_jit.label());
1855 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1856 emitPutArg(X86::eax, 0);
1857 emitCall(i, Machine::cti_op_not);
1858 emitPutResult(instruction[i + 1].u.operand);
1863 m_jit.link(iter->from, m_jit.label());
1864 emitPutArg(X86::eax, 0);
1865 emitCall(i, Machine::cti_op_jtrue);
1866 m_jit.testl_rr(X86::eax, X86::eax);
1867 unsigned target = instruction[i + 2].u.operand;
1868 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
1873 unsigned srcDst = instruction[i + 2].u.operand;
1874 m_jit.link(iter->from, m_jit.label());
1875 m_jit.link((++iter)->from, m_jit.label());
1876 emitPutArg(X86::eax, 0);
1877 emitCall(i, Machine::cti_op_post_inc);
1878 emitPutResult(instruction[i + 1].u.operand);
1879 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1880 emitPutResult(srcDst);
1885 m_jit.link(iter->from, m_jit.label());
1886 emitPutArg(X86::eax, 0);
1887 emitCall(i, Machine::cti_op_bitnot);
1888 emitPutResult(instruction[i + 1].u.operand);
1893 unsigned src1 = instruction[i + 2].u.operand;
1894 unsigned src2 = instruction[i + 3].u.operand;
1895 unsigned dst = instruction[i + 1].u.operand;
1896 if (getConstantImmediateNumericArg(src1)) {
1897 m_jit.link(iter->from, m_jit.label());
1898 emitGetPutArg(src1, 0, X86::ecx);
1899 emitPutArg(X86::eax, 4);
1900 emitCall(i, Machine::cti_op_bitand);
1902 } else if (getConstantImmediateNumericArg(src2)) {
1903 m_jit.link(iter->from, m_jit.label());
1904 emitPutArg(X86::eax, 0);
1905 emitGetPutArg(src2, 4, X86::ecx);
1906 emitCall(i, Machine::cti_op_bitand);
1909 m_jit.link(iter->from, m_jit.label());
1910 emitGetPutArg(src1, 0, X86::ecx);
1911 emitPutArg(X86::edx, 4);
1912 emitCall(i, Machine::cti_op_bitand);
1919 m_jit.link(iter->from, m_jit.label());
1920 emitPutArg(X86::eax, 0);
1921 emitCall(i, Machine::cti_op_jtrue);
1922 m_jit.testl_rr(X86::eax, X86::eax);
1923 unsigned target = instruction[i + 2].u.operand;
1924 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
1929 unsigned srcDst = instruction[i + 2].u.operand;
1930 m_jit.link(iter->from, m_jit.label());
1931 m_jit.link((++iter)->from, m_jit.label());
1932 emitPutArg(X86::eax, 0);
1933 emitCall(i, Machine::cti_op_post_dec);
1934 emitPutResult(instruction[i + 1].u.operand);
1935 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1936 emitPutResult(srcDst);
1941 m_jit.link(iter->from, m_jit.label());
1942 emitPutArg(X86::eax, 0);
1943 emitPutArg(X86::edx, 4);
1944 emitCall(i, Machine::cti_op_bitxor);
1945 emitPutResult(instruction[i + 1].u.operand);
1950 m_jit.link(iter->from, m_jit.label());
1951 emitPutArg(X86::eax, 0);
1952 emitPutArg(X86::edx, 4);
1953 emitCall(i, Machine::cti_op_bitor);
1954 emitPutResult(instruction[i + 1].u.operand);
1959 X86Assembler::JmpSrc notImm1 = iter->from;
1960 X86Assembler::JmpSrc notImm2 = (++iter)->from;
1961 m_jit.link((++iter)->from, m_jit.label());
1962 emitFastArithReTagImmediate(X86::eax);
1963 emitFastArithReTagImmediate(X86::ecx);
1964 m_jit.link(notImm1, m_jit.label());
1965 m_jit.link(notImm2, m_jit.label());
1966 emitPutArg(X86::eax, 0);
1967 emitPutArg(X86::ecx, 4);
1968 emitCall(i, Machine::cti_op_mod);
1969 emitPutResult(instruction[i + 1].u.operand);
1973 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_mul);
1977 case op_construct: {
1978 m_jit.link(iter->from, m_jit.label());
1979 m_jit.emitRestoreArgumentReference();
1981 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
1982 emitCall(i, Machine::cti_vm_compile);
1983 m_jit.call_r(X86::eax);
1985 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
1987 // In the interpreter the following actions are performed by op_ret:
1989 // Store the scope chain - returned by op_ret in %edx (see below) - to ExecState::m_scopeChain and CTI_ARGS_scopeChain on the stack.
1990 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1991 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
1992 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
1993 // Restore ExecState::m_callFrame.
1994 m_jit.leal_mr(-(m_codeBlock->numLocals + RegisterFile::CallFrameHeaderSize) * sizeof(Register), X86::edi, X86::edx);
1995 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
1996 // Restore CTI_ARGS_codeBlock.
1997 emitPutCTIParam(m_codeBlock, CTI_ARGS_codeBlock);
1999 emitPutResult(instruction[i + 1].u.operand);
2005 ASSERT_NOT_REACHED();
2009 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2012 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2015 void CTI::privateCompile()
2017 // Could use a popl_m, but would need to offset the following instruction if so.
2018 m_jit.popl_r(X86::ecx);
2019 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
2020 emitPutToCallFrameHeader(X86::ecx, RegisterFile::CTIReturnEIP);
2022 // Lazy copy of the scopeChain
2023 X86Assembler::JmpSrc callToUpdateScopeChain;
2024 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain) {
2025 m_jit.emitRestoreArgumentReference();
2026 callToUpdateScopeChain = m_jit.emitCall();
2029 privateCompileMainPass();
2030 privateCompileLinkPass();
2031 privateCompileSlowCases();
2033 ASSERT(m_jmpTable.isEmpty());
2035 void* code = m_jit.copy();
2038 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2039 for (unsigned i = 0; i < m_switches.size(); ++i) {
2040 SwitchRecord record = m_switches[i];
2041 unsigned opcodeIndex = record.m_opcodeIndex;
2043 if (record.m_type != SwitchRecord::String) {
2044 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2045 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2047 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2049 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2050 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2051 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2054 ASSERT(record.m_type == SwitchRecord::String);
2056 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2058 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2059 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2060 unsigned offset = it->second.branchOffset;
2061 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2066 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2067 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2069 // FIXME: There doesn't seem to be a way to hint to a hashmap that it should make a certain capacity available;
2070 // could be faster if we could do something like this:
2071 // m_codeBlock->ctiReturnAddressVPCMap.grow(m_calls.size());
2072 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2073 X86Assembler::link(code, iter->from, iter->to);
2074 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2077 if ((m_codeBlock->codeType == FunctionCode) && m_codeBlock->needsFullScopeChain)
2078 X86Assembler::link(code, callToUpdateScopeChain, (void*)Machine::cti_vm_updateScopeChain);
2080 // Link absolute addresses for jsr
2081 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2082 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2084 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2085 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2086 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2087 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2090 m_codeBlock->ctiCode = code;
2093 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2095 // Check eax is an object of the right StructureID.
2096 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2097 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2098 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2099 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2101 // Checks out okay! - getDirectOffset
2102 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2103 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2106 void* code = m_jit.copy();
2109 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2110 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2112 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2114 ctiRepatchCallByReturnAddress(returnAddress, code);
2117 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2119 #if USE(CTI_REPATCH_PIC)
2120 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2122 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2123 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2125 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2126 // referencing the prototype object - let's speculatively load it's table nice and early!)
2127 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2128 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2129 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2131 // check eax is an object of the right StructureID.
2132 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2133 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2134 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2135 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2137 // Check the prototype object's StructureID had not changed.
2138 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2139 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2140 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2142 // Checks out okay! - getDirectOffset
2143 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2145 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2147 void* code = m_jit.copy();
2150 // Use the repatch information to link the failure cases back to the original slow case routine.
2151 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2152 X86Assembler::link(code, failureCases1, slowCaseBegin);
2153 X86Assembler::link(code, failureCases2, slowCaseBegin);
2154 X86Assembler::link(code, failureCases3, slowCaseBegin);
2156 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2157 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2158 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2160 // Track the stub we have created so that it will be deleted later.
2161 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2163 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2164 // FIXME: should revert this repatching, on failure.
2165 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2166 X86Assembler::repatchBranchOffset(jmpLocation, code);
2168 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2169 // referencing the prototype object - let's speculatively load it's table nice and early!)
2170 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2171 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2172 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2174 // check eax is an object of the right StructureID.
2175 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2176 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2177 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2178 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2180 // Check the prototype object's StructureID had not changed.
2181 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2182 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2183 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2185 // Checks out okay! - getDirectOffset
2186 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2190 void* code = m_jit.copy();
2193 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2194 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2195 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2197 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2199 ctiRepatchCallByReturnAddress(returnAddress, code);
2203 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2207 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2209 // Check eax is an object of the right StructureID.
2210 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2211 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2212 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2213 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2215 StructureID* currStructureID = structureID;
2216 RefPtr<StructureID>* chainEntries = chain->head();
2217 JSObject* protoObject = 0;
2218 for (unsigned i = 0; i<count; ++i) {
2219 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2220 currStructureID = chainEntries[i].get();
2222 // Check the prototype object's StructureID had not changed.
2223 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2224 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2225 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2227 ASSERT(protoObject);
2229 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2230 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2231 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2234 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2236 void* code = m_jit.copy();
2239 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2240 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2242 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2244 ctiRepatchCallByReturnAddress(returnAddress, code);
2247 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2249 // check eax is an object of the right StructureID.
2250 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2251 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2252 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2253 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2255 // checks out okay! - putDirectOffset
2256 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2257 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2260 void* code = m_jit.copy();
2263 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2264 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2266 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2268 ctiRepatchCallByReturnAddress(returnAddress, code);
2273 static JSValue* SFX_CALL transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2275 StructureID* oldStructureID = newStructureID->previousID();
2277 baseObject->transitionTo(newStructureID);
2279 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2280 baseObject->allocatePropertyStorage(oldStructureID->propertyMap().storageSize(), oldStructureID->propertyMap().size());
2282 baseObject->putDirectOffset(cachedOffset, value);
2288 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2290 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2293 if (oldStructureID->propertyMap().storageSize() < JSObject::inlineStorageCapacity)
2296 if (oldStructureID->propertyMap().size() != newStructureID->propertyMap().size())
2302 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2304 Vector<X86Assembler::JmpSrc, 16> failureCases;
2305 // check eax is an object of the right StructureID.
2306 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2307 failureCases.append(m_jit.emitUnlinkedJne());
2308 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2309 failureCases.append(m_jit.emitUnlinkedJne());
2310 Vector<X86Assembler::JmpSrc> successCases;
2313 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2314 // proto(ecx) = baseObject->structureID()->prototype()
2315 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_type), X86::ecx);
2316 failureCases.append(m_jit.emitUnlinkedJne());
2317 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2319 // ecx = baseObject->m_structureID
2320 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2321 // null check the prototype
2322 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2323 successCases.append(m_jit.emitUnlinkedJe());
2325 // Check the structure id
2326 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2327 failureCases.append(m_jit.emitUnlinkedJne());
2329 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2330 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_type), X86::ecx);
2331 failureCases.append(m_jit.emitUnlinkedJne());
2332 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2335 failureCases.append(m_jit.emitUnlinkedJne());
2336 for (unsigned i = 0; i < successCases.size(); ++i)
2337 m_jit.link(successCases[i], m_jit.label());
2339 X86Assembler::JmpSrc callTarget;
2340 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2341 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2342 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2343 // codeblock should ensure oldStructureID->m_refCount > 0
2344 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2345 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2346 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2349 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2350 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2352 // Slow case transition -- we're going to need to quite a bit of work,
2353 // so just make a call
2354 m_jit.pushl_r(X86::edx);
2355 m_jit.pushl_r(X86::eax);
2356 m_jit.movl_i32r(cachedOffset, X86::eax);
2357 m_jit.pushl_r(X86::eax);
2358 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2359 m_jit.pushl_r(X86::eax);
2360 callTarget = m_jit.emitCall();
2361 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2364 void* code = m_jit.copy();
2367 for (unsigned i = 0; i < failureCases.size(); ++i)
2368 X86Assembler::link(code, failureCases[i], reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2370 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2371 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2373 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2375 ctiRepatchCallByReturnAddress(returnAddress, code);
2378 void* CTI::privateCompileArrayLengthTrampoline()
2380 // Check eax is an array
2381 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2382 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2383 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2384 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2386 // Checks out okay! - get the length from the storage
2387 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2388 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2390 m_jit.addl_rr(X86::eax, X86::eax);
2391 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2392 m_jit.addl_i8r(1, X86::eax);
2396 void* code = m_jit.copy();
2399 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2400 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2401 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2406 void* CTI::privateCompileStringLengthTrampoline()
2408 // Check eax is a string
2409 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2410 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2411 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2412 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2414 // Checks out okay! - get the length from the Ustring.
2415 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2416 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2418 m_jit.addl_rr(X86::eax, X86::eax);
2419 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2420 m_jit.addl_i8r(1, X86::eax);
2424 void* code = m_jit.copy();
2427 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2428 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2429 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2434 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2436 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2438 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2439 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2440 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2442 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2443 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2444 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2447 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2449 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2451 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2452 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2453 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2455 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2456 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2457 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2460 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2462 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2464 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2465 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2467 // Check eax is an array
2468 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2469 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2470 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2471 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2473 // Checks out okay! - get the length from the storage
2474 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2475 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2477 m_jit.addl_rr(X86::ecx, X86::ecx);
2478 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2479 m_jit.addl_i8r(1, X86::ecx);
2481 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2483 void* code = m_jit.copy();
2486 // Use the repatch information to link the failure cases back to the original slow case routine.
2487 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2488 X86Assembler::link(code, failureCases1, slowCaseBegin);
2489 X86Assembler::link(code, failureCases2, slowCaseBegin);
2490 X86Assembler::link(code, failureCases3, slowCaseBegin);
2492 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2493 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2494 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2496 // Track the stub we have created so that it will be deleted later.
2497 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2499 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2500 // FIXME: should revert this repatching, on failure.
2501 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2502 X86Assembler::repatchBranchOffset(jmpLocation, code);
2505 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
2507 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
2508 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
2509 m_jit.movl_mr(index * sizeof(Register), dst, dst);
2512 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
2514 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
2515 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
2516 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
2521 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
2523 // TODO: better error messages
2524 if (pattern.size() > MaxPatternSize) {
2525 *error_ptr = "regular expression too large";
2529 X86Assembler jit(exec->machine()->jitCodeBuffer());
2530 WRECParser parser(pattern, ignoreCase, multiline, jit);
2532 jit.emitConvertToFastCall();
2534 // Preserve regs & initialize outputRegister.
2535 jit.pushl_r(WRECGenerator::outputRegister);
2536 jit.pushl_r(WRECGenerator::currentValueRegister);
2537 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
2538 jit.pushl_r(WRECGenerator::currentPositionRegister);
2539 // load output pointer
2544 , X86::esp, WRECGenerator::outputRegister);
2546 // restart point on match fail.
2547 WRECGenerator::JmpDst nextLabel = jit.label();
2549 // (1) Parse Disjunction:
2551 // Parsing the disjunction should fully consume the pattern.
2552 JmpSrcVector failures;
2553 parser.parseDisjunction(failures);
2554 if (parser.isEndOfPattern()) {
2555 parser.m_err = WRECParser::Error_malformedPattern;
2558 // TODO: better error messages
2559 *error_ptr = "TODO: better error messages";
2564 // Set return value & pop registers from the stack.
2566 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
2567 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
2569 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
2570 jit.popl_r(X86::eax);
2571 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2572 jit.popl_r(WRECGenerator::currentValueRegister);
2573 jit.popl_r(WRECGenerator::outputRegister);
2576 jit.link(noOutput, jit.label());
2578 jit.popl_r(X86::eax);
2579 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
2580 jit.popl_r(WRECGenerator::currentValueRegister);
2581 jit.popl_r(WRECGenerator::outputRegister);
2585 // All fails link to here. Progress the start point & if it is within scope, loop.
2586 // Otherwise, return fail value.
2587 WRECGenerator::JmpDst here = jit.label();
2588 for (unsigned i = 0; i < failures.size(); ++i)
2589 jit.link(failures[i], here);
2592 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
2593 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
2594 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
2595 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
2596 jit.link(jit.emitUnlinkedJle(), nextLabel);
2598 jit.addl_i8r(4, X86::esp);
2600 jit.movl_i32r(-1, X86::eax);
2601 jit.popl_r(WRECGenerator::currentValueRegister);
2602 jit.popl_r(WRECGenerator::outputRegister);
2605 *numSubpatterns_ptr = parser.m_numSubpatterns;
2607 void* code = jit.copy();
2612 #endif // ENABLE(WREC)
2616 #endif // ENABLE(CTI)