2 * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "ThunkGenerators.h"
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
33 #include "JSArrayIterator.h"
35 #include "MathCommon.h"
36 #include "MaxFrameExtentForSlowPathCall.h"
37 #include "JSCInlines.h"
38 #include "SpecializedThunkJIT.h"
39 #include <wtf/InlineASM.h>
40 #include <wtf/StringPrintStream.h>
41 #include <wtf/text/StringImpl.h>
47 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
51 CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
52 jit.abortWithReason(TGInvalidPointer);
54 jit.pushToSave(pointerGPR);
55 jit.load8(pointerGPR, pointerGPR);
56 jit.popToRestore(pointerGPR);
59 // We will jump here if the JIT code tries to make a call, but the
60 // linking helper (C++ code) decides to throw an exception instead.
61 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
65 // The call pushed a return address, so we need to pop it back off to re-align the stack,
66 // even though we won't use it.
67 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
69 jit.copyCalleeSavesToVMCalleeSavesBuffer();
71 jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
72 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
73 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
74 jit.call(GPRInfo::nonArgGPR0);
75 jit.jumpToExceptionHandler();
77 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
78 return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
81 static void slowPathFor(
82 CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
84 jit.emitFunctionPrologue();
85 jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
86 #if OS(WINDOWS) && CPU(X86_64)
87 // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
88 // Other argument values are shift by 1. Use space on the stack for our two return values.
89 // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
90 // and space for the 16 byte return area.
91 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
92 jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
93 jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
94 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
95 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
96 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
97 jit.call(GPRInfo::nonArgGPR0);
98 jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
99 jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
100 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
102 if (maxFrameExtentForSlowPathCall)
103 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
104 jit.setupArgumentsWithExecState(GPRInfo::regT2);
105 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
106 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
107 jit.call(GPRInfo::nonArgGPR0);
108 if (maxFrameExtentForSlowPathCall)
109 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
112 // This slow call will return the address of one of the following:
113 // 1) Exception throwing thunk.
114 // 2) Host call return value returner thingy.
115 // 3) The function to call.
116 // The second return value GPR will hold a non-zero value for tail calls.
118 emitPointerValidation(jit, GPRInfo::returnValueGPR);
119 jit.emitFunctionEpilogue();
121 RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
122 CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
124 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
125 jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
127 doNotTrash.link(&jit);
128 jit.jump(GPRInfo::returnValueGPR);
131 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
133 // The return address is on the stack or in the link register. We will hence
134 // save the return address to the call frame while we make a C++ function call
135 // to perform linking and lazy compilation if necessary. We expect the callee
136 // to be in regT0/regT1 (payload/tag), the CallFrame to have already
137 // been adjusted, and all other registers to be available for use.
138 CCallHelpers jit(vm);
140 slowPathFor(jit, vm, operationLinkCall);
142 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
143 return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk"));
146 // For closure optimizations, we only include calls, since if you're using closures for
147 // object construction then you're going to lose big time anyway.
148 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
150 CCallHelpers jit(vm);
152 slowPathFor(jit, vm, operationLinkPolymorphicCall);
154 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
155 return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk"));
158 // FIXME: We should distinguish between a megamorphic virtual call vs. a slow
159 // path virtual call so that we can enable fast tail calls for megamorphic
160 // virtual calls by using the shuffler.
161 // https://bugs.webkit.org/show_bug.cgi?id=148831
162 MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
164 // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
165 // The return address is on the stack, or in the link register. We will hence
166 // jump to the callee, or save the return address to the call frame while we
167 // make a C++ function call to the appropriate JIT operation.
169 CCallHelpers jit(vm);
171 CCallHelpers::JumpList slowCase;
173 // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
174 // slow path execution for the profiler.
176 CCallHelpers::TrustedImm32(1),
177 CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
179 // FIXME: we should have a story for eliminating these checks. In many cases,
180 // the DFG knows that the value is definitely a cell, or definitely a function.
185 CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::tagMaskRegister));
189 CCallHelpers::NotEqual, GPRInfo::regT1,
190 CCallHelpers::TrustedImm32(JSValue::CellTag)));
192 jit.emitLoadStructure(GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
195 CCallHelpers::NotEqual,
196 CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
197 CCallHelpers::TrustedImmPtr(JSFunction::info())));
199 // Now we know we have a JSFunction.
202 CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
205 CCallHelpers::Address(
206 GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
207 callLinkInfo.specializationKind())),
209 slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
211 // Now we know that we have a CodeBlock, and we're committed to making a fast
214 // Make a tail call. This will return back to JIT code.
215 emitPointerValidation(jit, GPRInfo::regT4);
216 if (callLinkInfo.isTailCall()) {
217 jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
218 jit.prepareForTailCallSlow(GPRInfo::regT4);
220 jit.jump(GPRInfo::regT4);
224 // Here we don't know anything, so revert to the full slow path.
226 slowPathFor(jit, vm, operationVirtualCall);
228 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
229 return FINALIZE_CODE(
231 ("Virtual %s slow path thunk",
232 callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
235 enum ThunkEntryType { EnterViaCall, EnterViaJump };
237 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
239 // FIXME: This should be able to log ShadowChicken prologue packets.
240 // https://bugs.webkit.org/show_bug.cgi?id=155689
242 int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
244 JSInterfaceJIT jit(vm);
246 if (entryType == EnterViaCall)
247 jit.emitFunctionPrologue();
249 else if (entryType == EnterViaJump) {
250 // We're coming from a specialized thunk that has saved the prior tag registers' contents.
253 jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
255 jit.pop(JSInterfaceJIT::tagMaskRegister);
256 jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
261 jit.emitPutToCallFrameHeader(0, JSStack::CodeBlock);
262 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
265 // Calling convention: f(ecx, edx, ...);
266 // Host function signature: f(ExecState*);
267 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
269 jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
272 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
273 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
274 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
276 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
280 // Calling convention: f(edi, esi, edx, ecx, ...);
281 // Host function signature: f(ExecState*);
282 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
284 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
285 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
286 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
289 // Calling convention: f(ecx, edx, r8, r9, ...);
290 // Host function signature: f(ExecState*);
291 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
293 // Leave space for the callee parameter home addresses.
294 // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
295 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
297 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
298 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
299 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
301 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
305 COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
306 COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
307 COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
309 // Host function signature: f(ExecState*);
310 jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
312 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
313 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
314 jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
315 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
317 // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
318 jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
321 // Calling convention is f(argumentGPR0, argumentGPR1, ...).
322 // Host function signature is f(ExecState*).
323 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
325 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
326 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
327 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
330 // Restore stack space
331 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
334 #error "JIT not supported on this platform."
335 UNUSED_PARAM(executableOffsetToFunction);
336 abortWithReason(TGNotSupported);
339 // Check for an exception
341 jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
342 JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
344 JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
345 JSInterfaceJIT::NotEqual,
346 JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
347 JSInterfaceJIT::TrustedImm32(0));
350 jit.emitFunctionEpilogue();
354 // Handle an exception
355 exceptionHandler.link(&jit);
357 jit.copyCalleeSavesToVMCalleeSavesBuffer();
358 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
360 #if CPU(X86) && USE(JSVALUE32_64)
361 jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
362 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
363 jit.push(JSInterfaceJIT::regT0);
366 // Allocate space on stack for the 4 parameter registers.
367 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
369 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
371 jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
372 jit.call(JSInterfaceJIT::regT3);
373 #if CPU(X86) && USE(JSVALUE32_64)
374 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
376 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
379 jit.jumpToExceptionHandler();
381 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
382 return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
385 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
387 return nativeForGenerator(vm, CodeForCall);
390 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
392 return nativeForGenerator(vm, CodeForCall, EnterViaJump);
395 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
397 return nativeForGenerator(vm, CodeForConstruct);
400 MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
402 JSInterfaceJIT jit(vm);
404 // We enter with fixup count in argumentGPR0
405 // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
408 const GPRReg extraTemp = JSInterfaceJIT::regT0;
410 const GPRReg extraTemp = JSInterfaceJIT::regT5;
413 jit.pop(JSInterfaceJIT::regT4);
415 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
416 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
417 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
419 // Check to see if we have extra slots we can use
420 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
421 jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
422 JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
423 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
424 JSInterfaceJIT::Label fillExtraSlots(jit.label());
425 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
426 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
427 jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
428 jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
429 JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
430 noExtraSlot.link(&jit);
432 jit.neg64(JSInterfaceJIT::argumentGPR0);
434 // Move current frame down argumentGPR0 number of slots
435 JSInterfaceJIT::Label copyLoop(jit.label());
436 jit.load64(JSInterfaceJIT::regT3, extraTemp);
437 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
438 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
439 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
441 // Fill in argumentGPR0 missing arg slots with undefined
442 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
443 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
444 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
445 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
446 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
447 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
449 // Adjust call frame register and stack pointer to account for missing args
450 jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
451 jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
452 jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
453 jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
458 jit.push(JSInterfaceJIT::regT4);
463 jit.pop(JSInterfaceJIT::regT4);
465 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
466 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
467 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
469 // Check to see if we have extra slots we can use
470 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
471 jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
472 JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
473 JSInterfaceJIT::Label fillExtraSlots(jit.label());
474 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
475 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
476 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
477 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
478 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
479 jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
480 jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
481 JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
482 noExtraSlot.link(&jit);
484 jit.neg32(JSInterfaceJIT::argumentGPR0);
486 // Move current frame down argumentGPR0 number of slots
487 JSInterfaceJIT::Label copyLoop(jit.label());
488 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
489 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
490 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
491 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
492 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
493 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
495 // Fill in argumentGPR0 missing arg slots with undefined
496 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
497 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
498 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
499 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
500 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
501 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
503 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
504 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
506 // Adjust call frame register and stack pointer to account for missing args
507 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
508 jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
509 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
510 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
515 jit.push(JSInterfaceJIT::regT4);
520 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
521 return FINALIZE_CODE(patchBuffer, ("fixup arity"));
524 MacroAssemblerCodeRef unreachableGenerator(VM* vm)
526 JSInterfaceJIT jit(vm);
530 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
531 return FINALIZE_CODE(patchBuffer, ("unreachable thunk"));
534 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
537 jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
539 // Load string length to regT2, and start the process of loading the data pointer into regT0
540 jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
541 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
542 jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
545 jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
547 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
548 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
550 // Load the character
551 SpecializedThunkJIT::JumpList is16Bit;
552 SpecializedThunkJIT::JumpList cont8Bit;
553 // Load the string flags
554 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
555 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
556 is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
557 jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
558 cont8Bit.append(jit.jump());
560 jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
564 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
566 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
567 jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
568 jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
569 jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
572 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
574 SpecializedThunkJIT jit(vm, 1);
575 stringCharLoad(jit, vm);
576 jit.returnInt32(SpecializedThunkJIT::regT0);
577 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
580 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
582 SpecializedThunkJIT jit(vm, 1);
583 stringCharLoad(jit, vm);
584 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
585 jit.returnJSCell(SpecializedThunkJIT::regT0);
586 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
589 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
591 SpecializedThunkJIT jit(vm, 1);
593 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
594 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
595 jit.returnJSCell(SpecializedThunkJIT::regT0);
596 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
599 MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
601 SpecializedThunkJIT jit(vm, 1);
602 MacroAssembler::Jump nonIntArgJump;
603 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
605 SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
606 jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
607 jit.returnInt32(SpecializedThunkJIT::regT1);
609 if (jit.supportsFloatingPointTruncate()) {
610 nonIntArgJump.link(&jit);
611 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
612 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
613 jit.appendFailure(jit.jump());
615 jit.appendFailure(nonIntArgJump);
617 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
620 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
622 SpecializedThunkJIT jit(vm, 1);
623 if (!jit.supportsFloatingPointSqrt())
624 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
626 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
627 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
628 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
629 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
633 #define UnaryDoubleOpWrapper(function) function##Wrapper
634 enum MathThunkCallingConvention { };
635 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
637 #if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
639 #define defineUnaryDoubleOpWrapper(function) \
642 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
643 HIDE_SYMBOL(function##Thunk) "\n" \
644 SYMBOL_STRING(function##Thunk) ":" "\n" \
646 "call " GLOBAL_REFERENCE(function) "\n" \
651 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
653 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
655 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
656 #define defineUnaryDoubleOpWrapper(function) \
659 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
660 HIDE_SYMBOL(function##Thunk) "\n" \
661 SYMBOL_STRING(function##Thunk) ":" "\n" \
664 "movsd %xmm0, (%esp) \n" \
665 "call __x86.get_pc_thunk.bx\n" \
666 "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
667 "call " GLOBAL_REFERENCE(function) "\n" \
669 "movsd (%esp), %xmm0 \n" \
675 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
677 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
679 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
680 #define defineUnaryDoubleOpWrapper(function) \
683 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
684 HIDE_SYMBOL(function##Thunk) "\n" \
685 SYMBOL_STRING(function##Thunk) ":" "\n" \
687 "movsd %xmm0, (%esp) \n" \
688 "call " GLOBAL_REFERENCE(function) "\n" \
690 "movsd (%esp), %xmm0 \n" \
695 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
697 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
699 #elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
701 #define defineUnaryDoubleOpWrapper(function) \
705 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
706 HIDE_SYMBOL(function##Thunk) "\n" \
708 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
709 SYMBOL_STRING(function##Thunk) ":" "\n" \
711 "vmov r0, r1, d0\n" \
712 "blx " GLOBAL_REFERENCE(function) "\n" \
713 "vmov d0, r0, r1\n" \
718 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
720 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
724 #define defineUnaryDoubleOpWrapper(function) \
728 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
729 HIDE_SYMBOL(function##Thunk) "\n" \
730 SYMBOL_STRING(function##Thunk) ":" "\n" \
731 "b " GLOBAL_REFERENCE(function) "\n" \
735 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
737 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
739 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
741 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
742 static double (_cdecl *floorFunction)(double) = floor;
743 static double (_cdecl *ceilFunction)(double) = ceil;
744 static double (_cdecl *truncFunction)(double) = trunc;
745 static double (_cdecl *expFunction)(double) = exp;
746 static double (_cdecl *logFunction)(double) = log;
747 static double (_cdecl *jsRoundFunction)(double) = jsRound;
749 #define defineUnaryDoubleOpWrapper(function) \
750 extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
755 __asm movsd mmword ptr [esp], xmm0 \
756 __asm call function##Function \
757 __asm fstp qword ptr [esp] \
758 __asm movsd xmm0, mmword ptr [esp] \
763 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
767 #define defineUnaryDoubleOpWrapper(function) \
768 static MathThunk UnaryDoubleOpWrapper(function) = 0
771 defineUnaryDoubleOpWrapper(jsRound);
772 defineUnaryDoubleOpWrapper(exp);
773 defineUnaryDoubleOpWrapper(log);
774 defineUnaryDoubleOpWrapper(floor);
775 defineUnaryDoubleOpWrapper(ceil);
776 defineUnaryDoubleOpWrapper(trunc);
778 static const double oneConstant = 1.0;
779 static const double negativeHalfConstant = -0.5;
780 static const double halfConstant = 0.5;
782 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
784 SpecializedThunkJIT jit(vm, 1);
785 MacroAssembler::Jump nonIntJump;
786 if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
787 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
788 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
789 jit.returnInt32(SpecializedThunkJIT::regT0);
790 nonIntJump.link(&jit);
791 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
793 if (jit.supportsFloatingPointRounding()) {
794 SpecializedThunkJIT::JumpList doubleResult;
795 jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
796 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
797 jit.returnInt32(SpecializedThunkJIT::regT0);
798 doubleResult.link(&jit);
799 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
800 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
803 SpecializedThunkJIT::Jump intResult;
804 SpecializedThunkJIT::JumpList doubleResult;
805 if (jit.supportsFloatingPointTruncate()) {
806 jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
807 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
808 SpecializedThunkJIT::JumpList slowPath;
809 // Handle the negative doubles in the slow path for now.
810 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
811 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
812 intResult = jit.jump();
815 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
816 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
817 if (jit.supportsFloatingPointTruncate())
818 intResult.link(&jit);
819 jit.returnInt32(SpecializedThunkJIT::regT0);
820 doubleResult.link(&jit);
821 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
822 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
825 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
827 SpecializedThunkJIT jit(vm, 1);
828 if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
829 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
830 MacroAssembler::Jump nonIntJump;
831 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
832 jit.returnInt32(SpecializedThunkJIT::regT0);
833 nonIntJump.link(&jit);
834 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
835 if (jit.supportsFloatingPointRounding())
836 jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
838 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
840 SpecializedThunkJIT::JumpList doubleResult;
841 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
842 jit.returnInt32(SpecializedThunkJIT::regT0);
843 doubleResult.link(&jit);
844 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
845 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
848 MacroAssemblerCodeRef truncThunkGenerator(VM* vm)
850 SpecializedThunkJIT jit(vm, 1);
851 if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
852 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
853 MacroAssembler::Jump nonIntJump;
854 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
855 jit.returnInt32(SpecializedThunkJIT::regT0);
856 nonIntJump.link(&jit);
857 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
858 if (jit.supportsFloatingPointRounding())
859 jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
861 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
863 SpecializedThunkJIT::JumpList doubleResult;
864 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
865 jit.returnInt32(SpecializedThunkJIT::regT0);
866 doubleResult.link(&jit);
867 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
868 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "trunc");
871 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
873 SpecializedThunkJIT jit(vm, 1);
874 if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
875 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
876 MacroAssembler::Jump nonIntJump;
877 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
878 jit.returnInt32(SpecializedThunkJIT::regT0);
879 nonIntJump.link(&jit);
880 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
881 SpecializedThunkJIT::Jump intResult;
882 SpecializedThunkJIT::JumpList doubleResult;
883 if (jit.supportsFloatingPointTruncate()) {
884 jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
885 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
886 SpecializedThunkJIT::JumpList slowPath;
887 // Handle the negative doubles in the slow path for now.
888 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
889 jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
890 jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
891 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
892 intResult = jit.jump();
895 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
896 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
897 if (jit.supportsFloatingPointTruncate())
898 intResult.link(&jit);
899 jit.returnInt32(SpecializedThunkJIT::regT0);
900 doubleResult.link(&jit);
901 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
902 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
905 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
907 if (!UnaryDoubleOpWrapper(exp))
908 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
909 SpecializedThunkJIT jit(vm, 1);
910 if (!jit.supportsFloatingPoint())
911 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
912 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
913 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
914 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
915 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
918 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
920 if (!UnaryDoubleOpWrapper(log))
921 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
922 SpecializedThunkJIT jit(vm, 1);
923 if (!jit.supportsFloatingPoint())
924 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
925 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
926 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
927 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
928 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
931 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
933 SpecializedThunkJIT jit(vm, 1);
934 if (!jit.supportsFloatingPointAbs())
935 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
936 MacroAssembler::Jump nonIntJump;
937 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
938 jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
939 jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
940 jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
941 jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
942 jit.returnInt32(SpecializedThunkJIT::regT0);
943 nonIntJump.link(&jit);
944 // Shame about the double int conversion here.
945 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
946 jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
947 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
948 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
951 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
953 SpecializedThunkJIT jit(vm, 2);
954 if (!jit.supportsFloatingPoint())
955 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
957 jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
958 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
959 MacroAssembler::Jump nonIntExponent;
960 jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
961 jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
963 MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
964 MacroAssembler::Label startLoop(jit.label());
966 MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
967 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
968 exponentIsEven.link(&jit);
969 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
970 jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
971 jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
973 exponentIsZero.link(&jit);
976 SpecializedThunkJIT::JumpList doubleResult;
977 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
978 jit.returnInt32(SpecializedThunkJIT::regT0);
979 doubleResult.link(&jit);
980 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
983 if (jit.supportsFloatingPointSqrt()) {
984 nonIntExponent.link(&jit);
985 jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
986 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
987 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
988 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
989 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
990 jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
992 SpecializedThunkJIT::JumpList doubleResult;
993 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
994 jit.returnInt32(SpecializedThunkJIT::regT0);
995 doubleResult.link(&jit);
996 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
998 jit.appendFailure(nonIntExponent);
1000 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
1003 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
1005 SpecializedThunkJIT jit(vm, 2);
1006 MacroAssembler::Jump nonIntArg0Jump;
1007 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1008 SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1009 MacroAssembler::Jump nonIntArg1Jump;
1010 jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1011 SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1012 jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1013 jit.returnInt32(SpecializedThunkJIT::regT0);
1015 if (jit.supportsFloatingPointTruncate()) {
1016 nonIntArg0Jump.link(&jit);
1017 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1018 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1019 jit.appendFailure(jit.jump());
1021 jit.appendFailure(nonIntArg0Jump);
1023 if (jit.supportsFloatingPointTruncate()) {
1024 nonIntArg1Jump.link(&jit);
1025 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1026 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1027 jit.appendFailure(jit.jump());
1029 jit.appendFailure(nonIntArg1Jump);
1031 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1034 MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
1036 SpecializedThunkJIT jit(vm, 0);
1037 if (!jit.supportsFloatingPoint())
1038 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1041 jit.emitRandomThunk(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
1042 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1044 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
1046 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1052 #endif // ENABLE(JIT)