2 * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "ThunkGenerators.h"
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
33 #include "JSArrayIterator.h"
35 #include "MathCommon.h"
36 #include "MaxFrameExtentForSlowPathCall.h"
37 #include "JSCInlines.h"
38 #include "SpecializedThunkJIT.h"
39 #include <wtf/InlineASM.h>
40 #include <wtf/StringPrintStream.h>
41 #include <wtf/text/StringImpl.h>
47 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
51 CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
52 jit.abortWithReason(TGInvalidPointer);
54 jit.pushToSave(pointerGPR);
55 jit.load8(pointerGPR, pointerGPR);
56 jit.popToRestore(pointerGPR);
59 // We will jump here if the JIT code tries to make a call, but the
60 // linking helper (C++ code) decides to throw an exception instead.
61 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
65 // The call pushed a return address, so we need to pop it back off to re-align the stack,
66 // even though we won't use it.
67 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
69 jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
70 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
71 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
72 jit.call(GPRInfo::nonArgGPR0);
73 jit.jumpToExceptionHandler();
75 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
76 return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
79 static void slowPathFor(
80 CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction)
82 jit.emitFunctionPrologue();
83 jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
84 if (maxFrameExtentForSlowPathCall)
85 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
86 jit.setupArgumentsWithExecState(GPRInfo::regT2);
87 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
88 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
89 jit.call(GPRInfo::nonArgGPR0);
90 if (maxFrameExtentForSlowPathCall)
91 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
93 // This slow call will return the address of one of the following:
94 // 1) Exception throwing thunk.
95 // 2) Host call return value returner thingy.
96 // 3) The function to call.
97 emitPointerValidation(jit, GPRInfo::returnValueGPR);
98 jit.emitFunctionEpilogue();
99 jit.jump(GPRInfo::returnValueGPR);
102 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
104 // The return address is on the stack or in the link register. We will hence
105 // save the return address to the call frame while we make a C++ function call
106 // to perform linking and lazy compilation if necessary. We expect the callee
107 // to be in regT0/regT1 (payload/tag), the CallFrame to have already
108 // been adjusted, and all other registers to be available for use.
110 CCallHelpers jit(vm);
112 slowPathFor(jit, vm, operationLinkCall);
114 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
115 return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk"));
118 // For closure optimizations, we only include calls, since if you're using closures for
119 // object construction then you're going to lose big time anyway.
120 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
122 CCallHelpers jit(vm);
124 slowPathFor(jit, vm, operationLinkPolymorphicCall);
126 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
127 return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk"));
130 MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
132 // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
133 // The return address is on the stack, or in the link register. We will hence
134 // jump to the callee, or save the return address to the call frame while we
135 // make a C++ function call to the appropriate JIT operation.
137 CCallHelpers jit(vm);
139 CCallHelpers::JumpList slowCase;
141 // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
142 // slow path execution for the profiler.
144 CCallHelpers::TrustedImm32(1),
145 CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
147 // FIXME: we should have a story for eliminating these checks. In many cases,
148 // the DFG knows that the value is definitely a cell, or definitely a function.
151 jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
155 CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
159 CCallHelpers::NotEqual, GPRInfo::regT1,
160 CCallHelpers::TrustedImm32(JSValue::CellTag)));
162 AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
165 CCallHelpers::NotEqual,
166 CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
167 CCallHelpers::TrustedImmPtr(JSFunction::info())));
169 // Now we know we have a JSFunction.
172 CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
175 CCallHelpers::Address(
176 GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
177 callLinkInfo.specializationKind(), callLinkInfo.registerPreservationMode())),
179 slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
181 // Now we know that we have a CodeBlock, and we're committed to making a fast
184 // Make a tail call. This will return back to JIT code.
185 emitPointerValidation(jit, GPRInfo::regT4);
186 jit.jump(GPRInfo::regT4);
190 // Here we don't know anything, so revert to the full slow path.
192 slowPathFor(jit, vm, operationVirtualCall);
194 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
195 return FINALIZE_CODE(
197 ("Virtual %s%s slow path thunk at CodePtr(%p)",
198 callLinkInfo.specializationKind() == CodeForCall ? "call" : "construct",
199 callLinkInfo.registerPreservationMode() == MustPreserveRegisters ? " that preserves registers" : "",
200 callLinkInfo.callReturnLocation().dataLocation()));
203 enum ThunkEntryType { EnterViaCall, EnterViaJump };
205 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
207 int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
209 JSInterfaceJIT jit(vm);
211 if (entryType == EnterViaCall)
212 jit.emitFunctionPrologue();
214 jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
215 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
218 // Calling convention: f(ecx, edx, ...);
219 // Host function signature: f(ExecState*);
220 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
222 jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
225 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
226 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
227 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
229 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
233 // Calling convention: f(edi, esi, edx, ecx, ...);
234 // Host function signature: f(ExecState*);
235 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
237 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
238 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
239 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
242 // Calling convention: f(ecx, edx, r8, r9, ...);
243 // Host function signature: f(ExecState*);
244 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
246 // Leave space for the callee parameter home addresses.
247 // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
248 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
250 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
251 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
252 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
254 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
258 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
259 COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
260 COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
261 COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
262 COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
264 // Host function signature: f(ExecState*);
265 jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
267 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
268 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
269 jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
270 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
272 // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
273 jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
276 // Calling convention is f(argumentGPR0, argumentGPR1, ...).
277 // Host function signature is f(ExecState*).
278 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
280 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
281 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
282 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
285 // Restore stack space
286 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
289 #error "JIT not supported on this platform."
290 UNUSED_PARAM(executableOffsetToFunction);
291 abortWithReason(TGNotSupported);
294 // Check for an exception
296 jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
297 JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
299 JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
300 JSInterfaceJIT::NotEqual,
301 JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
302 JSInterfaceJIT::TrustedImm32(0));
305 jit.emitFunctionEpilogue();
309 // Handle an exception
310 exceptionHandler.link(&jit);
312 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
314 #if CPU(X86) && USE(JSVALUE32_64)
315 jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
316 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0);
317 jit.push(JSInterfaceJIT::regT0);
320 // Allocate space on stack for the 4 parameter registers.
321 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
323 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0);
325 jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
326 jit.call(JSInterfaceJIT::regT3);
327 #if CPU(X86) && USE(JSVALUE32_64)
328 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
330 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
333 jit.jumpToExceptionHandler();
335 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
336 return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
339 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
341 return nativeForGenerator(vm, CodeForCall);
344 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
346 return nativeForGenerator(vm, CodeForCall, EnterViaJump);
349 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
351 return nativeForGenerator(vm, CodeForConstruct);
354 MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
356 JSInterfaceJIT jit(vm);
358 // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in
359 // regT5 on 32-bit and regT7 on 64-bit.
362 jit.pop(JSInterfaceJIT::regT4);
364 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
365 jit.neg64(JSInterfaceJIT::regT0);
366 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6);
367 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
368 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
370 // Move current frame down regT0 number of slots
371 JSInterfaceJIT::Label copyLoop(jit.label());
372 jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1);
373 jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
374 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
375 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
377 // Fill in regT0 - 1 missing arg slots with undefined
378 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
379 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
380 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
381 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
382 jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
383 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
384 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
386 // Adjust call frame register and stack pointer to account for missing args
387 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
388 jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
389 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
390 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
392 // Save the original return PC.
393 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
394 jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
396 // Install the new return PC.
397 jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
400 jit.push(JSInterfaceJIT::regT4);
405 jit.pop(JSInterfaceJIT::regT4);
407 jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
408 jit.neg32(JSInterfaceJIT::regT0);
409 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
410 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
411 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
413 // Move current frame down regT0 number of slots
414 JSInterfaceJIT::Label copyLoop(jit.label());
415 jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
416 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
417 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
418 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
419 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
420 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
422 // Fill in regT0 - 1 missing arg slots with undefined
423 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
424 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
425 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
426 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
427 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
428 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
429 jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
431 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
432 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
434 // Adjust call frame register and stack pointer to account for missing args
435 jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
436 jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
437 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
438 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
440 // Save the original return PC.
441 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
442 jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
444 // Install the new return PC.
445 jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
448 jit.push(JSInterfaceJIT::regT4);
453 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
454 return FINALIZE_CODE(patchBuffer, ("fixup arity"));
457 MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm)
459 JSInterfaceJIT jit(vm);
462 jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0);
464 jit.setupResults(GPRInfo::regT0, GPRInfo::regT1);
467 unsigned numberOfParameters = 0;
468 numberOfParameters++; // The 'this' argument.
469 numberOfParameters++; // The true return PC.
471 unsigned numberOfRegsForCall =
472 JSStack::CallFrameHeaderSize + numberOfParameters;
474 unsigned numberOfBytesForCall =
475 numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
477 unsigned alignedNumberOfBytesForCall =
478 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
480 // The real return address is stored above the arguments. We passed one argument, which is
481 // 'this'. So argument at index 1 is the return address.
483 AssemblyHelpers::Address(
484 AssemblyHelpers::stackPointerRegister,
485 (virtualRegisterForArgument(1).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)),
489 AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall),
490 AssemblyHelpers::stackPointerRegister);
492 jit.jump(GPRInfo::regT2);
494 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
495 return FINALIZE_CODE(patchBuffer, ("baseline getter return thunk"));
498 MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm)
500 JSInterfaceJIT jit(vm);
502 unsigned numberOfParameters = 0;
503 numberOfParameters++; // The 'this' argument.
504 numberOfParameters++; // The value to set.
505 numberOfParameters++; // The true return PC.
507 unsigned numberOfRegsForCall =
508 JSStack::CallFrameHeaderSize + numberOfParameters;
510 unsigned numberOfBytesForCall =
511 numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
513 unsigned alignedNumberOfBytesForCall =
514 WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
516 // The real return address is stored above the arguments. We passed two arguments, so
517 // the argument at index 2 is the return address.
519 AssemblyHelpers::Address(
520 AssemblyHelpers::stackPointerRegister,
521 (virtualRegisterForArgument(2).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)),
525 AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall),
526 AssemblyHelpers::stackPointerRegister);
528 jit.jump(GPRInfo::regT2);
530 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
531 return FINALIZE_CODE(patchBuffer, ("baseline setter return thunk"));
534 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
537 jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
539 // Load string length to regT2, and start the process of loading the data pointer into regT0
540 jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
541 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
542 jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
545 jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
547 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
548 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
550 // Load the character
551 SpecializedThunkJIT::JumpList is16Bit;
552 SpecializedThunkJIT::JumpList cont8Bit;
553 // Load the string flags
554 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
555 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
556 is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
557 jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
558 cont8Bit.append(jit.jump());
560 jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
564 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
566 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
567 jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
568 jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
569 jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
572 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
574 SpecializedThunkJIT jit(vm, 1);
575 stringCharLoad(jit, vm);
576 jit.returnInt32(SpecializedThunkJIT::regT0);
577 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
580 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
582 SpecializedThunkJIT jit(vm, 1);
583 stringCharLoad(jit, vm);
584 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
585 jit.returnJSCell(SpecializedThunkJIT::regT0);
586 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
589 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
591 SpecializedThunkJIT jit(vm, 1);
593 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
594 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
595 jit.returnJSCell(SpecializedThunkJIT::regT0);
596 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
599 MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
601 SpecializedThunkJIT jit(vm, 1);
602 MacroAssembler::Jump nonIntArgJump;
603 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
605 SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
606 jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
607 jit.returnInt32(SpecializedThunkJIT::regT1);
609 if (jit.supportsFloatingPointTruncate()) {
610 nonIntArgJump.link(&jit);
611 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
612 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
613 jit.appendFailure(jit.jump());
615 jit.appendFailure(nonIntArgJump);
617 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
620 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
622 SpecializedThunkJIT jit(vm, 1);
623 if (!jit.supportsFloatingPointSqrt())
624 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
626 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
627 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
628 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
629 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
633 #define UnaryDoubleOpWrapper(function) function##Wrapper
634 enum MathThunkCallingConvention { };
635 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
637 #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
639 #define defineUnaryDoubleOpWrapper(function) \
642 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
643 HIDE_SYMBOL(function##Thunk) "\n" \
644 SYMBOL_STRING(function##Thunk) ":" "\n" \
646 "call " GLOBAL_REFERENCE(function) "\n" \
651 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
653 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
655 #elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__)
656 #define defineUnaryDoubleOpWrapper(function) \
659 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
660 HIDE_SYMBOL(function##Thunk) "\n" \
661 SYMBOL_STRING(function##Thunk) ":" "\n" \
664 "movsd %xmm0, (%esp) \n" \
665 "call __x86.get_pc_thunk.bx\n" \
666 "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
667 "call " GLOBAL_REFERENCE(function) "\n" \
669 "movsd (%esp), %xmm0 \n" \
675 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
677 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
679 #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
680 #define defineUnaryDoubleOpWrapper(function) \
683 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
684 HIDE_SYMBOL(function##Thunk) "\n" \
685 SYMBOL_STRING(function##Thunk) ":" "\n" \
687 "movsd %xmm0, (%esp) \n" \
688 "call " GLOBAL_REFERENCE(function) "\n" \
690 "movsd (%esp), %xmm0 \n" \
695 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
697 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
699 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
701 #define defineUnaryDoubleOpWrapper(function) \
705 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
706 HIDE_SYMBOL(function##Thunk) "\n" \
708 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
709 SYMBOL_STRING(function##Thunk) ":" "\n" \
711 "vmov r0, r1, d0\n" \
712 "blx " GLOBAL_REFERENCE(function) "\n" \
713 "vmov d0, r0, r1\n" \
718 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
720 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
724 #define defineUnaryDoubleOpWrapper(function) \
728 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
729 HIDE_SYMBOL(function##Thunk) "\n" \
730 SYMBOL_STRING(function##Thunk) ":" "\n" \
731 "b " GLOBAL_REFERENCE(function) "\n" \
735 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
737 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
739 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
741 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
742 static double (_cdecl *floorFunction)(double) = floor;
743 static double (_cdecl *ceilFunction)(double) = ceil;
744 static double (_cdecl *expFunction)(double) = exp;
745 static double (_cdecl *logFunction)(double) = log;
746 static double (_cdecl *jsRoundFunction)(double) = jsRound;
748 #define defineUnaryDoubleOpWrapper(function) \
749 extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
754 __asm movsd mmword ptr [esp], xmm0 \
755 __asm call function##Function \
756 __asm fstp qword ptr [esp] \
757 __asm movsd xmm0, mmword ptr [esp] \
762 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
766 #define defineUnaryDoubleOpWrapper(function) \
767 static MathThunk UnaryDoubleOpWrapper(function) = 0
770 defineUnaryDoubleOpWrapper(jsRound);
771 defineUnaryDoubleOpWrapper(exp);
772 defineUnaryDoubleOpWrapper(log);
773 defineUnaryDoubleOpWrapper(floor);
774 defineUnaryDoubleOpWrapper(ceil);
776 static const double oneConstant = 1.0;
777 static const double negativeHalfConstant = -0.5;
778 static const double zeroConstant = 0.0;
779 static const double halfConstant = 0.5;
781 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
783 SpecializedThunkJIT jit(vm, 1);
784 MacroAssembler::Jump nonIntJump;
785 if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
786 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
787 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
788 jit.returnInt32(SpecializedThunkJIT::regT0);
789 nonIntJump.link(&jit);
790 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
792 SpecializedThunkJIT::JumpList doubleResult;
793 jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
794 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
795 jit.returnInt32(SpecializedThunkJIT::regT0);
796 doubleResult.link(&jit);
797 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
799 SpecializedThunkJIT::Jump intResult;
800 SpecializedThunkJIT::JumpList doubleResult;
801 if (jit.supportsFloatingPointTruncate()) {
802 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
803 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
804 SpecializedThunkJIT::JumpList slowPath;
805 // Handle the negative doubles in the slow path for now.
806 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
807 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
808 intResult = jit.jump();
811 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
812 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
813 if (jit.supportsFloatingPointTruncate())
814 intResult.link(&jit);
815 jit.returnInt32(SpecializedThunkJIT::regT0);
816 doubleResult.link(&jit);
817 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
819 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
822 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
824 SpecializedThunkJIT jit(vm, 1);
825 if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
826 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
827 MacroAssembler::Jump nonIntJump;
828 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
829 jit.returnInt32(SpecializedThunkJIT::regT0);
830 nonIntJump.link(&jit);
831 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
833 jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
835 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
837 SpecializedThunkJIT::JumpList doubleResult;
838 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
839 jit.returnInt32(SpecializedThunkJIT::regT0);
840 doubleResult.link(&jit);
841 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
842 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
845 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
847 SpecializedThunkJIT jit(vm, 1);
848 if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
849 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
850 MacroAssembler::Jump nonIntJump;
851 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
852 jit.returnInt32(SpecializedThunkJIT::regT0);
853 nonIntJump.link(&jit);
854 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
855 SpecializedThunkJIT::Jump intResult;
856 SpecializedThunkJIT::JumpList doubleResult;
857 if (jit.supportsFloatingPointTruncate()) {
858 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
859 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
860 SpecializedThunkJIT::JumpList slowPath;
861 // Handle the negative doubles in the slow path for now.
862 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
863 jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
864 jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
865 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
866 intResult = jit.jump();
869 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
870 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
871 if (jit.supportsFloatingPointTruncate())
872 intResult.link(&jit);
873 jit.returnInt32(SpecializedThunkJIT::regT0);
874 doubleResult.link(&jit);
875 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
876 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
879 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
881 if (!UnaryDoubleOpWrapper(exp))
882 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
883 SpecializedThunkJIT jit(vm, 1);
884 if (!jit.supportsFloatingPoint())
885 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
886 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
887 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
888 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
889 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
892 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
894 if (!UnaryDoubleOpWrapper(log))
895 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
896 SpecializedThunkJIT jit(vm, 1);
897 if (!jit.supportsFloatingPoint())
898 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
899 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
900 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
901 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
902 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
905 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
907 SpecializedThunkJIT jit(vm, 1);
908 if (!jit.supportsFloatingPointAbs())
909 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
910 MacroAssembler::Jump nonIntJump;
911 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
912 jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
913 jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
914 jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
915 jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
916 jit.returnInt32(SpecializedThunkJIT::regT0);
917 nonIntJump.link(&jit);
918 // Shame about the double int conversion here.
919 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
920 jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
921 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
922 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
925 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
927 SpecializedThunkJIT jit(vm, 2);
928 if (!jit.supportsFloatingPoint())
929 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
931 jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
932 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
933 MacroAssembler::Jump nonIntExponent;
934 jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
935 jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
937 MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
938 MacroAssembler::Label startLoop(jit.label());
940 MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
941 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
942 exponentIsEven.link(&jit);
943 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
944 jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
945 jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
947 exponentIsZero.link(&jit);
950 SpecializedThunkJIT::JumpList doubleResult;
951 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
952 jit.returnInt32(SpecializedThunkJIT::regT0);
953 doubleResult.link(&jit);
954 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
957 if (jit.supportsFloatingPointSqrt()) {
958 nonIntExponent.link(&jit);
959 jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
960 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
961 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
962 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
963 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
964 jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
966 SpecializedThunkJIT::JumpList doubleResult;
967 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
968 jit.returnInt32(SpecializedThunkJIT::regT0);
969 doubleResult.link(&jit);
970 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
972 jit.appendFailure(nonIntExponent);
974 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
977 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
979 SpecializedThunkJIT jit(vm, 2);
980 MacroAssembler::Jump nonIntArg0Jump;
981 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
982 SpecializedThunkJIT::Label doneLoadingArg0(&jit);
983 MacroAssembler::Jump nonIntArg1Jump;
984 jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
985 SpecializedThunkJIT::Label doneLoadingArg1(&jit);
986 jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
987 jit.returnInt32(SpecializedThunkJIT::regT0);
989 if (jit.supportsFloatingPointTruncate()) {
990 nonIntArg0Jump.link(&jit);
991 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
992 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
993 jit.appendFailure(jit.jump());
995 jit.appendFailure(nonIntArg0Jump);
997 if (jit.supportsFloatingPointTruncate()) {
998 nonIntArg1Jump.link(&jit);
999 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1000 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1001 jit.appendFailure(jit.jump());
1003 jit.appendFailure(nonIntArg1Jump);
1005 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1010 #endif // ENABLE(JIT)