2 * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "ThunkGenerators.h"
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
33 #include "JSArrayIterator.h"
35 #include "MaxFrameExtentForSlowPathCall.h"
36 #include "JSCInlines.h"
37 #include "SpecializedThunkJIT.h"
38 #include <wtf/InlineASM.h>
39 #include <wtf/StringPrintStream.h>
40 #include <wtf/text/StringImpl.h>
46 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
49 CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
52 jit.pushToSave(pointerGPR);
53 jit.load8(pointerGPR, pointerGPR);
54 jit.popToRestore(pointerGPR);
57 UNUSED_PARAM(pointerGPR);
61 // We will jump here if the JIT code tries to make a call, but the
62 // linking helper (C++ code) decides to throw an exception instead.
63 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
67 // The call pushed a return address, so we need to pop it back off to re-align the stack,
68 // even though we won't use it.
69 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
71 jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
72 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
73 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
74 jit.call(GPRInfo::nonArgGPR0);
75 jit.jumpToExceptionHandler();
77 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
78 return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
81 static void slowPathFor(
82 CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
84 jit.emitFunctionPrologue();
85 jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
86 if (maxFrameExtentForSlowPathCall)
87 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
88 jit.setupArgumentsExecState();
89 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
90 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
91 jit.call(GPRInfo::nonArgGPR0);
92 if (maxFrameExtentForSlowPathCall)
93 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
95 // This slow call will return the address of one of the following:
96 // 1) Exception throwing thunk.
97 // 2) Host call return value returner thingy.
98 // 3) The function to call.
99 emitPointerValidation(jit, GPRInfo::returnValueGPR);
100 jit.emitFunctionEpilogue();
101 jit.jump(GPRInfo::returnValueGPR);
104 static MacroAssemblerCodeRef linkForThunkGenerator(
105 VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
107 // The return address is on the stack or in the link register. We will hence
108 // save the return address to the call frame while we make a C++ function call
109 // to perform linking and lazy compilation if necessary. We expect the callee
110 // to be in regT0/regT1 (payload/tag), the CallFrame to have already
111 // been adjusted, and all other registers to be available for use.
113 CCallHelpers jit(vm);
115 slowPathFor(jit, vm, operationLinkFor(kind, registers));
117 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
118 return FINALIZE_CODE(
120 ("Link %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
123 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
125 return linkForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
128 MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
130 return linkForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
133 MacroAssemblerCodeRef linkCallThatPreservesRegsThunkGenerator(VM* vm)
135 return linkForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
138 MacroAssemblerCodeRef linkConstructThatPreservesRegsThunkGenerator(VM* vm)
140 return linkForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
143 static MacroAssemblerCodeRef linkClosureCallForThunkGenerator(
144 VM* vm, RegisterPreservationMode registers)
146 CCallHelpers jit(vm);
148 slowPathFor(jit, vm, operationLinkClosureCallFor(registers));
150 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
151 return FINALIZE_CODE(patchBuffer, ("Link closure call %s slow path thunk", registers == MustPreserveRegisters ? " that preserves registers" : ""));
154 // For closure optimizations, we only include calls, since if you're using closures for
155 // object construction then you're going to lose big time anyway.
156 MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm)
158 return linkClosureCallForThunkGenerator(vm, RegisterPreservationNotRequired);
161 MacroAssemblerCodeRef linkClosureCallThatPreservesRegsThunkGenerator(VM* vm)
163 return linkClosureCallForThunkGenerator(vm, MustPreserveRegisters);
166 static MacroAssemblerCodeRef virtualForThunkGenerator(
167 VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
169 // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
170 // The return address is on the stack, or in the link register. We will hence
171 // jump to the callee, or save the return address to the call frame while we
172 // make a C++ function call to the appropriate JIT operation.
174 CCallHelpers jit(vm);
176 CCallHelpers::JumpList slowCase;
178 // FIXME: we should have a story for eliminating these checks. In many cases,
179 // the DFG knows that the value is definitely a cell, or definitely a function.
182 jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT2);
186 CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT2));
190 CCallHelpers::NotEqual, GPRInfo::regT1,
191 CCallHelpers::TrustedImm32(JSValue::CellTag)));
193 AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT2, GPRInfo::regT1);
196 CCallHelpers::NotEqual,
197 CCallHelpers::Address(GPRInfo::regT2, Structure::classInfoOffset()),
198 CCallHelpers::TrustedImmPtr(JSFunction::info())));
200 // Now we know we have a JSFunction.
203 CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
206 CCallHelpers::Address(
207 GPRInfo::regT2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
209 slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT2));
211 // Now we know that we have a CodeBlock, and we're committed to making a fast
215 CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
218 jit.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
220 jit.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
221 jit.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag),
222 JSStack::ScopeChain);
225 // Make a tail call. This will return back to JIT code.
226 emitPointerValidation(jit, GPRInfo::regT2);
227 jit.jump(GPRInfo::regT2);
231 // Here we don't know anything, so revert to the full slow path.
233 slowPathFor(jit, vm, operationVirtualFor(kind, registers));
235 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
236 return FINALIZE_CODE(
238 ("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
241 MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
243 return virtualForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
246 MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
248 return virtualForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
251 MacroAssemblerCodeRef virtualCallThatPreservesRegsThunkGenerator(VM* vm)
253 return virtualForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
256 MacroAssemblerCodeRef virtualConstructThatPreservesRegsThunkGenerator(VM* vm)
258 return virtualForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
261 enum ThunkEntryType { EnterViaCall, EnterViaJump };
263 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
265 int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
267 JSInterfaceJIT jit(vm);
269 if (entryType == EnterViaCall)
270 jit.emitFunctionPrologue();
272 jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
273 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
276 // Load caller frame's scope chain into this callframe so that whatever we call can
277 // get to its global data.
278 jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
279 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
280 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
282 // Calling convention: f(ecx, edx, ...);
283 // Host function signature: f(ExecState*);
284 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
286 jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
289 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
290 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
291 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
293 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
296 // Load caller frame's scope chain into this callframe so that whatever we call can
297 // get to its global data.
298 jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
299 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
300 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
302 // Calling convention: f(edi, esi, edx, ecx, ...);
303 // Host function signature: f(ExecState*);
304 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
306 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
307 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
308 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
311 // Calling convention: f(ecx, edx, r8, r9, ...);
312 // Host function signature: f(ExecState*);
313 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
315 // Leave space for the callee parameter home addresses and align the stack.
316 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
318 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
319 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
320 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
322 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
326 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
327 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
328 COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
329 COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
330 COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
332 // Load caller frame's scope chain into this callframe so that whatever we call can
333 // get to its global data.
334 jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3);
335 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3);
336 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
338 // Host function signature: f(ExecState*);
339 jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
341 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
342 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
343 jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
344 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
345 // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
346 jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2);
347 jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
348 jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
351 // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
352 jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
355 // Calling convention is f(argumentGPR0, argumentGPR1, ...).
356 // Host function signature is f(ExecState*).
357 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
359 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
360 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
361 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
364 // Restore stack space
365 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
368 #error "JIT not supported on this platform."
369 UNUSED_PARAM(executableOffsetToFunction);
373 // Check for an exception
375 jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
376 JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
378 JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
379 JSInterfaceJIT::NotEqual,
380 JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
381 JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
384 jit.emitFunctionEpilogue();
388 // Handle an exception
389 exceptionHandler.link(&jit);
391 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
393 #if CPU(X86) && USE(JSVALUE32_64)
394 jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
395 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0);
396 jit.push(JSInterfaceJIT::regT0);
398 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0);
400 jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
401 jit.call(JSInterfaceJIT::regT3);
402 #if CPU(X86) && USE(JSVALUE32_64)
403 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
406 jit.jumpToExceptionHandler();
408 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
409 return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
412 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
414 return nativeForGenerator(vm, CodeForCall);
417 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
419 return nativeForGenerator(vm, CodeForCall, EnterViaJump);
422 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
424 return nativeForGenerator(vm, CodeForConstruct);
427 MacroAssemblerCodeRef arityFixup(VM* vm)
429 JSInterfaceJIT jit(vm);
431 // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in
432 // regT5 on 32-bit and regT7 on 64-bit.
435 jit.pop(JSInterfaceJIT::regT4);
437 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
438 jit.neg64(JSInterfaceJIT::regT0);
439 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6);
440 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
441 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
443 // Move current frame down regT0 number of slots
444 JSInterfaceJIT::Label copyLoop(jit.label());
445 jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1);
446 jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
447 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
448 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
450 // Fill in regT0 - 1 missing arg slots with undefined
451 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
452 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
453 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
454 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
455 jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
456 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
457 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
459 // Adjust call frame register and stack pointer to account for missing args
460 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
461 jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
462 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
463 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
465 // Save the original return PC.
466 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
467 jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
469 // Install the new return PC.
470 jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
473 jit.push(JSInterfaceJIT::regT4);
478 jit.pop(JSInterfaceJIT::regT4);
480 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
481 jit.neg32(JSInterfaceJIT::regT0);
482 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
483 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
484 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
486 // Move current frame down regT0 number of slots
487 JSInterfaceJIT::Label copyLoop(jit.label());
488 jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
489 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
490 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
491 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
492 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
493 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
495 // Fill in regT0 - 1 missing arg slots with undefined
496 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
497 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
498 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
499 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
500 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
501 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
502 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
504 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
505 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
507 // Adjust call frame register and stack pointer to account for missing args
508 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
509 jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
510 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
511 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
513 // Save the original return PC.
514 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
515 jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
517 // Install the new return PC.
518 jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
521 jit.push(JSInterfaceJIT::regT4);
526 LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
527 return FINALIZE_CODE(patchBuffer, ("fixup arity"));
530 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
533 jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
535 // Load string length to regT2, and start the process of loading the data pointer into regT0
536 jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
537 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
538 jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
541 jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
543 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
544 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
546 // Load the character
547 SpecializedThunkJIT::JumpList is16Bit;
548 SpecializedThunkJIT::JumpList cont8Bit;
549 // Load the string flags
550 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
551 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
552 is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
553 jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
554 cont8Bit.append(jit.jump());
556 jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
560 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
562 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
563 jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
564 jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
565 jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
568 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
570 SpecializedThunkJIT jit(vm, 1);
571 stringCharLoad(jit, vm);
572 jit.returnInt32(SpecializedThunkJIT::regT0);
573 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
576 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
578 SpecializedThunkJIT jit(vm, 1);
579 stringCharLoad(jit, vm);
580 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
581 jit.returnJSCell(SpecializedThunkJIT::regT0);
582 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
585 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
587 SpecializedThunkJIT jit(vm, 1);
589 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
590 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
591 jit.returnJSCell(SpecializedThunkJIT::regT0);
592 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
595 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
597 SpecializedThunkJIT jit(vm, 1);
598 if (!jit.supportsFloatingPointSqrt())
599 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
601 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
602 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
603 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
604 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
608 #define UnaryDoubleOpWrapper(function) function##Wrapper
609 enum MathThunkCallingConvention { };
610 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
613 double jsRound(double) REFERENCED_FROM_ASM;
614 double jsRound(double d)
616 double integer = ceil(d);
617 return integer - (integer - d > 0.5);
622 #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
624 #define defineUnaryDoubleOpWrapper(function) \
627 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
628 HIDE_SYMBOL(function##Thunk) "\n" \
629 SYMBOL_STRING(function##Thunk) ":" "\n" \
631 "call " GLOBAL_REFERENCE(function) "\n" \
636 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
638 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
640 #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
641 #define defineUnaryDoubleOpWrapper(function) \
644 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
645 HIDE_SYMBOL(function##Thunk) "\n" \
646 SYMBOL_STRING(function##Thunk) ":" "\n" \
648 "movsd %xmm0, (%esp) \n" \
649 "call " GLOBAL_REFERENCE(function) "\n" \
651 "movsd (%esp), %xmm0 \n" \
656 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
658 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
660 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
662 #define defineUnaryDoubleOpWrapper(function) \
666 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
667 HIDE_SYMBOL(function##Thunk) "\n" \
669 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
670 SYMBOL_STRING(function##Thunk) ":" "\n" \
672 "vmov r0, r1, d0\n" \
673 "blx " GLOBAL_REFERENCE(function) "\n" \
674 "vmov d0, r0, r1\n" \
679 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
681 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
685 #define defineUnaryDoubleOpWrapper(function) \
689 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
690 HIDE_SYMBOL(function##Thunk) "\n" \
691 SYMBOL_STRING(function##Thunk) ":" "\n" \
692 "b " GLOBAL_REFERENCE(function) "\n" \
695 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
697 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
701 #define defineUnaryDoubleOpWrapper(function) \
702 static MathThunk UnaryDoubleOpWrapper(function) = 0
705 defineUnaryDoubleOpWrapper(jsRound);
706 defineUnaryDoubleOpWrapper(exp);
707 defineUnaryDoubleOpWrapper(log);
708 defineUnaryDoubleOpWrapper(floor);
709 defineUnaryDoubleOpWrapper(ceil);
711 static const double oneConstant = 1.0;
712 static const double negativeHalfConstant = -0.5;
713 static const double zeroConstant = 0.0;
714 static const double halfConstant = 0.5;
716 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
718 SpecializedThunkJIT jit(vm, 1);
719 MacroAssembler::Jump nonIntJump;
720 if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
721 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
722 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
723 jit.returnInt32(SpecializedThunkJIT::regT0);
724 nonIntJump.link(&jit);
725 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
727 SpecializedThunkJIT::JumpList doubleResult;
728 jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
729 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
730 jit.returnInt32(SpecializedThunkJIT::regT0);
731 doubleResult.link(&jit);
732 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
734 SpecializedThunkJIT::Jump intResult;
735 SpecializedThunkJIT::JumpList doubleResult;
736 if (jit.supportsFloatingPointTruncate()) {
737 jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
738 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
739 SpecializedThunkJIT::JumpList slowPath;
740 // Handle the negative doubles in the slow path for now.
741 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
742 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
743 intResult = jit.jump();
746 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
747 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
748 if (jit.supportsFloatingPointTruncate())
749 intResult.link(&jit);
750 jit.returnInt32(SpecializedThunkJIT::regT0);
751 doubleResult.link(&jit);
752 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
754 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
757 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
759 SpecializedThunkJIT jit(vm, 1);
760 if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
761 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
762 MacroAssembler::Jump nonIntJump;
763 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
764 jit.returnInt32(SpecializedThunkJIT::regT0);
765 nonIntJump.link(&jit);
766 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
768 jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
770 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
772 SpecializedThunkJIT::JumpList doubleResult;
773 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
774 jit.returnInt32(SpecializedThunkJIT::regT0);
775 doubleResult.link(&jit);
776 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
777 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
780 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
782 SpecializedThunkJIT jit(vm, 1);
783 if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
784 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
785 MacroAssembler::Jump nonIntJump;
786 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
787 jit.returnInt32(SpecializedThunkJIT::regT0);
788 nonIntJump.link(&jit);
789 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
790 SpecializedThunkJIT::Jump intResult;
791 SpecializedThunkJIT::JumpList doubleResult;
792 if (jit.supportsFloatingPointTruncate()) {
793 jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
794 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
795 SpecializedThunkJIT::JumpList slowPath;
796 // Handle the negative doubles in the slow path for now.
797 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
798 jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
799 jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
800 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
801 intResult = jit.jump();
804 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
805 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
806 if (jit.supportsFloatingPointTruncate())
807 intResult.link(&jit);
808 jit.returnInt32(SpecializedThunkJIT::regT0);
809 doubleResult.link(&jit);
810 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
811 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
814 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
816 if (!UnaryDoubleOpWrapper(exp))
817 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
818 SpecializedThunkJIT jit(vm, 1);
819 if (!jit.supportsFloatingPoint())
820 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
821 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
822 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
823 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
824 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
827 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
829 if (!UnaryDoubleOpWrapper(log))
830 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
831 SpecializedThunkJIT jit(vm, 1);
832 if (!jit.supportsFloatingPoint())
833 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
834 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
835 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
836 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
837 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
840 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
842 SpecializedThunkJIT jit(vm, 1);
843 if (!jit.supportsFloatingPointAbs())
844 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
845 MacroAssembler::Jump nonIntJump;
846 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
847 jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
848 jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
849 jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
850 jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
851 jit.returnInt32(SpecializedThunkJIT::regT0);
852 nonIntJump.link(&jit);
853 // Shame about the double int conversion here.
854 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
855 jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
856 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
857 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
860 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
862 SpecializedThunkJIT jit(vm, 2);
863 if (!jit.supportsFloatingPoint())
864 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
866 jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
867 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
868 MacroAssembler::Jump nonIntExponent;
869 jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
870 jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
872 MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
873 MacroAssembler::Label startLoop(jit.label());
875 MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
876 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
877 exponentIsEven.link(&jit);
878 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
879 jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
880 jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
882 exponentIsZero.link(&jit);
885 SpecializedThunkJIT::JumpList doubleResult;
886 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
887 jit.returnInt32(SpecializedThunkJIT::regT0);
888 doubleResult.link(&jit);
889 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
892 if (jit.supportsFloatingPointSqrt()) {
893 nonIntExponent.link(&jit);
894 jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
895 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
896 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
897 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
898 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
899 jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
901 SpecializedThunkJIT::JumpList doubleResult;
902 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
903 jit.returnInt32(SpecializedThunkJIT::regT0);
904 doubleResult.link(&jit);
905 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
907 jit.appendFailure(nonIntExponent);
909 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
912 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
914 SpecializedThunkJIT jit(vm, 2);
915 MacroAssembler::Jump nonIntArg0Jump;
916 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
917 SpecializedThunkJIT::Label doneLoadingArg0(&jit);
918 MacroAssembler::Jump nonIntArg1Jump;
919 jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
920 SpecializedThunkJIT::Label doneLoadingArg1(&jit);
921 jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
922 jit.returnInt32(SpecializedThunkJIT::regT0);
924 if (jit.supportsFloatingPointTruncate()) {
925 nonIntArg0Jump.link(&jit);
926 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
927 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
928 jit.appendFailure(jit.jump());
930 jit.appendFailure(nonIntArg0Jump);
932 if (jit.supportsFloatingPointTruncate()) {
933 nonIntArg1Jump.link(&jit);
934 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
935 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
936 jit.appendFailure(jit.jump());
938 jit.appendFailure(nonIntArg1Jump);
940 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
943 static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind)
945 typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32;
946 typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr;
947 typedef SpecializedThunkJIT::Address Address;
948 typedef SpecializedThunkJIT::BaseIndex BaseIndex;
949 typedef SpecializedThunkJIT::Jump Jump;
951 SpecializedThunkJIT jit(vm);
952 // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively
953 jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1);
955 // Early exit if we don't have a thunk for this form of iteration
956 jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue)));
958 jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0);
960 jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1);
962 // Pull out the butterfly from iteratedObject
963 jit.load8(Address(SpecializedThunkJIT::regT0, JSCell::indexingTypeOffset()), SpecializedThunkJIT::regT3);
964 jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
966 jit.and32(TrustedImm32(IndexingShapeMask), SpecializedThunkJIT::regT3);
968 Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength()));
969 // Return the termination signal to indicate that we've finished
970 jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0);
971 jit.returnJSCell(SpecializedThunkJIT::regT0);
975 if (kind == ArrayIterateKey) {
976 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
977 jit.returnInt32(SpecializedThunkJIT::regT1);
978 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-key");
981 ASSERT(kind == ArrayIterateValue);
983 // Okay, now we're returning a value so make sure we're inside the vector size
984 jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength())));
986 // So now we perform inline loads for int32, value/undecided, and double storage
987 Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(UndecidedShape));
988 Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ContiguousShape));
990 undecidedStorage.link(&jit);
992 jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
995 jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0);
996 Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0);
997 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0);
999 jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1000 jit.returnJSValue(SpecializedThunkJIT::regT0);
1002 jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3);
1003 Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag));
1004 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
1005 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0);
1006 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1007 jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1);
1009 jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
1010 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1011 jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1);
1012 jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
1014 notContiguousStorage.link(&jit);
1016 Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(Int32Shape));
1017 jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
1018 jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
1019 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1020 jit.returnInt32(SpecializedThunkJIT::regT0);
1021 notInt32Storage.link(&jit);
1023 jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(DoubleShape)));
1024 jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
1025 jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0);
1026 jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1027 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1029 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-value");
1032 MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm)
1034 return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey);
1037 MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm)
1039 return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue);
1044 #endif // ENABLE(JIT)