2 * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "ThunkGenerators.h"
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
33 #include "JSArrayIterator.h"
35 #include "MathCommon.h"
36 #include "MaxFrameExtentForSlowPathCall.h"
37 #include "JSCInlines.h"
38 #include "SpecializedThunkJIT.h"
39 #include <wtf/InlineASM.h>
40 #include <wtf/StringPrintStream.h>
41 #include <wtf/text/StringImpl.h>
47 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
51 CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
52 jit.abortWithReason(TGInvalidPointer);
54 jit.pushToSave(pointerGPR);
55 jit.load8(pointerGPR, pointerGPR);
56 jit.popToRestore(pointerGPR);
59 // We will jump here if the JIT code tries to make a call, but the
60 // linking helper (C++ code) decides to throw an exception instead.
61 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
65 // The call pushed a return address, so we need to pop it back off to re-align the stack,
66 // even though we won't use it.
67 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
69 jit.copyCalleeSavesToVMCalleeSavesBuffer();
71 jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
72 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
73 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
74 jit.call(GPRInfo::nonArgGPR0);
75 jit.jumpToExceptionHandler();
77 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
78 return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
81 static void slowPathFor(
82 CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
84 jit.emitFunctionPrologue();
85 jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
86 #if OS(WINDOWS) && CPU(X86_64)
87 // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
88 // Other argument values are shift by 1. Use space on the stack for our two return values.
89 // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
90 // and space for the 16 byte return area.
91 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
92 jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
93 jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
94 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
95 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
96 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
97 jit.call(GPRInfo::nonArgGPR0);
98 jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
99 jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
100 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
102 if (maxFrameExtentForSlowPathCall)
103 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
104 jit.setupArgumentsWithExecState(GPRInfo::regT2);
105 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
106 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
107 jit.call(GPRInfo::nonArgGPR0);
108 if (maxFrameExtentForSlowPathCall)
109 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
112 // This slow call will return the address of one of the following:
113 // 1) Exception throwing thunk.
114 // 2) Host call return value returner thingy.
115 // 3) The function to call.
116 // The second return value GPR will hold a non-zero value for tail calls.
118 emitPointerValidation(jit, GPRInfo::returnValueGPR);
119 jit.emitFunctionEpilogue();
121 RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
122 CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
124 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
125 jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
127 doNotTrash.link(&jit);
128 jit.jump(GPRInfo::returnValueGPR);
131 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
133 // The return address is on the stack or in the link register. We will hence
134 // save the return address to the call frame while we make a C++ function call
135 // to perform linking and lazy compilation if necessary. We expect the callee
136 // to be in regT0/regT1 (payload/tag), the CallFrame to have already
137 // been adjusted, and all other registers to be available for use.
138 CCallHelpers jit(vm);
140 slowPathFor(jit, vm, operationLinkCall);
142 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
143 return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk"));
146 // For closure optimizations, we only include calls, since if you're using closures for
147 // object construction then you're going to lose big time anyway.
148 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
150 CCallHelpers jit(vm);
152 slowPathFor(jit, vm, operationLinkPolymorphicCall);
154 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
155 return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk"));
158 // FIXME: We should distinguish between a megamorphic virtual call vs. a slow
159 // path virtual call so that we can enable fast tail calls for megamorphic
160 // virtual calls by using the shuffler.
161 // https://bugs.webkit.org/show_bug.cgi?id=148831
162 MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
164 // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
165 // The return address is on the stack, or in the link register. We will hence
166 // jump to the callee, or save the return address to the call frame while we
167 // make a C++ function call to the appropriate JIT operation.
169 CCallHelpers jit(vm);
171 CCallHelpers::JumpList slowCase;
173 // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
174 // slow path execution for the profiler.
176 CCallHelpers::TrustedImm32(1),
177 CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
179 // FIXME: we should have a story for eliminating these checks. In many cases,
180 // the DFG knows that the value is definitely a cell, or definitely a function.
185 CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::tagMaskRegister));
189 CCallHelpers::NotEqual, GPRInfo::regT1,
190 CCallHelpers::TrustedImm32(JSValue::CellTag)));
192 slowCase.append(jit.branchIfNotType(GPRInfo::regT0, JSFunctionType));
194 // Now we know we have a JSFunction.
197 CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
200 CCallHelpers::Address(
201 GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
202 callLinkInfo.specializationKind())),
204 slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
206 // Now we know that we have a CodeBlock, and we're committed to making a fast
209 // Make a tail call. This will return back to JIT code.
210 emitPointerValidation(jit, GPRInfo::regT4);
211 if (callLinkInfo.isTailCall()) {
212 jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
213 jit.prepareForTailCallSlow(GPRInfo::regT4);
215 jit.jump(GPRInfo::regT4);
219 // Here we don't know anything, so revert to the full slow path.
221 slowPathFor(jit, vm, operationVirtualCall);
223 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
224 return FINALIZE_CODE(
226 ("Virtual %s slow path thunk",
227 callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
230 enum ThunkEntryType { EnterViaCall, EnterViaJump };
232 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
234 // FIXME: This should be able to log ShadowChicken prologue packets.
235 // https://bugs.webkit.org/show_bug.cgi?id=155689
237 int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
239 JSInterfaceJIT jit(vm);
241 if (entryType == EnterViaCall)
242 jit.emitFunctionPrologue();
244 else if (entryType == EnterViaJump) {
245 // We're coming from a specialized thunk that has saved the prior tag registers' contents.
248 jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
250 jit.pop(JSInterfaceJIT::tagMaskRegister);
251 jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
256 jit.emitPutToCallFrameHeader(0, JSStack::CodeBlock);
257 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
260 // Calling convention: f(ecx, edx, ...);
261 // Host function signature: f(ExecState*);
262 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
264 jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
267 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
268 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
269 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
271 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
275 // Calling convention: f(edi, esi, edx, ecx, ...);
276 // Host function signature: f(ExecState*);
277 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
279 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
280 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
281 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
284 // Calling convention: f(ecx, edx, r8, r9, ...);
285 // Host function signature: f(ExecState*);
286 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
288 // Leave space for the callee parameter home addresses.
289 // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
290 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
292 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
293 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
294 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
296 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
300 COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
301 COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
302 COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
304 // Host function signature: f(ExecState*);
305 jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
307 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
308 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
309 jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
310 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
312 // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
313 jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
316 // Calling convention is f(argumentGPR0, argumentGPR1, ...).
317 // Host function signature is f(ExecState*).
318 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
320 jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
321 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
322 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
325 // Restore stack space
326 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
329 #error "JIT not supported on this platform."
330 UNUSED_PARAM(executableOffsetToFunction);
331 abortWithReason(TGNotSupported);
334 // Check for an exception
336 jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
337 JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
339 JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
340 JSInterfaceJIT::NotEqual,
341 JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
342 JSInterfaceJIT::TrustedImm32(0));
345 jit.emitFunctionEpilogue();
349 // Handle an exception
350 exceptionHandler.link(&jit);
352 jit.copyCalleeSavesToVMCalleeSavesBuffer();
353 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
355 #if CPU(X86) && USE(JSVALUE32_64)
356 jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
357 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
358 jit.push(JSInterfaceJIT::regT0);
361 // Allocate space on stack for the 4 parameter registers.
362 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
364 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
366 jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
367 jit.call(JSInterfaceJIT::regT3);
368 #if CPU(X86) && USE(JSVALUE32_64)
369 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
371 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
374 jit.jumpToExceptionHandler();
376 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
377 return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
380 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
382 return nativeForGenerator(vm, CodeForCall);
385 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
387 return nativeForGenerator(vm, CodeForCall, EnterViaJump);
390 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
392 return nativeForGenerator(vm, CodeForConstruct);
395 MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
397 JSInterfaceJIT jit(vm);
399 // We enter with fixup count in argumentGPR0
400 // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
403 const GPRReg extraTemp = JSInterfaceJIT::regT0;
405 const GPRReg extraTemp = JSInterfaceJIT::regT5;
408 jit.pop(JSInterfaceJIT::regT4);
410 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
411 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
412 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
414 // Check to see if we have extra slots we can use
415 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
416 jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
417 JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
418 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
419 JSInterfaceJIT::Label fillExtraSlots(jit.label());
420 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
421 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
422 jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
423 jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
424 JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
425 noExtraSlot.link(&jit);
427 jit.neg64(JSInterfaceJIT::argumentGPR0);
429 // Move current frame down argumentGPR0 number of slots
430 JSInterfaceJIT::Label copyLoop(jit.label());
431 jit.load64(JSInterfaceJIT::regT3, extraTemp);
432 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
433 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
434 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
436 // Fill in argumentGPR0 missing arg slots with undefined
437 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
438 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
439 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
440 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
441 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
442 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
444 // Adjust call frame register and stack pointer to account for missing args
445 jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
446 jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
447 jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
448 jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
453 jit.push(JSInterfaceJIT::regT4);
458 jit.pop(JSInterfaceJIT::regT4);
460 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
461 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
462 jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
464 // Check to see if we have extra slots we can use
465 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
466 jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
467 JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
468 JSInterfaceJIT::Label fillExtraSlots(jit.label());
469 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
470 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
471 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
472 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
473 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
474 jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
475 jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
476 JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
477 noExtraSlot.link(&jit);
479 jit.neg32(JSInterfaceJIT::argumentGPR0);
481 // Move current frame down argumentGPR0 number of slots
482 JSInterfaceJIT::Label copyLoop(jit.label());
483 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
484 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
485 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
486 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
487 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
488 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
490 // Fill in argumentGPR0 missing arg slots with undefined
491 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
492 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
493 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
494 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
495 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
496 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
498 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
499 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
501 // Adjust call frame register and stack pointer to account for missing args
502 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
503 jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
504 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
505 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
510 jit.push(JSInterfaceJIT::regT4);
515 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
516 return FINALIZE_CODE(patchBuffer, ("fixup arity"));
519 MacroAssemblerCodeRef unreachableGenerator(VM* vm)
521 JSInterfaceJIT jit(vm);
525 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
526 return FINALIZE_CODE(patchBuffer, ("unreachable thunk"));
529 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
532 jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
534 // Load string length to regT2, and start the process of loading the data pointer into regT0
535 jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
536 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
537 jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
540 jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
542 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
543 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
545 // Load the character
546 SpecializedThunkJIT::JumpList is16Bit;
547 SpecializedThunkJIT::JumpList cont8Bit;
548 // Load the string flags
549 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
550 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
551 is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
552 jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
553 cont8Bit.append(jit.jump());
555 jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
559 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
561 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
562 jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
563 jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
564 jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
567 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
569 SpecializedThunkJIT jit(vm, 1);
570 stringCharLoad(jit, vm);
571 jit.returnInt32(SpecializedThunkJIT::regT0);
572 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
575 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
577 SpecializedThunkJIT jit(vm, 1);
578 stringCharLoad(jit, vm);
579 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
580 jit.returnJSCell(SpecializedThunkJIT::regT0);
581 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
584 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
586 SpecializedThunkJIT jit(vm, 1);
588 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
589 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
590 jit.returnJSCell(SpecializedThunkJIT::regT0);
591 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
594 MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
596 SpecializedThunkJIT jit(vm, 1);
597 MacroAssembler::Jump nonIntArgJump;
598 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
600 SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
601 jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
602 jit.returnInt32(SpecializedThunkJIT::regT1);
604 if (jit.supportsFloatingPointTruncate()) {
605 nonIntArgJump.link(&jit);
606 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
607 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
608 jit.appendFailure(jit.jump());
610 jit.appendFailure(nonIntArgJump);
612 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
615 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
617 SpecializedThunkJIT jit(vm, 1);
618 if (!jit.supportsFloatingPointSqrt())
619 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
621 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
622 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
623 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
624 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
628 #define UnaryDoubleOpWrapper(function) function##Wrapper
629 enum MathThunkCallingConvention { };
630 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
632 #if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
634 #define defineUnaryDoubleOpWrapper(function) \
637 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
638 HIDE_SYMBOL(function##Thunk) "\n" \
639 SYMBOL_STRING(function##Thunk) ":" "\n" \
641 "call " GLOBAL_REFERENCE(function) "\n" \
646 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
648 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
650 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
651 #define defineUnaryDoubleOpWrapper(function) \
654 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
655 HIDE_SYMBOL(function##Thunk) "\n" \
656 SYMBOL_STRING(function##Thunk) ":" "\n" \
659 "movsd %xmm0, (%esp) \n" \
660 "call __x86.get_pc_thunk.bx\n" \
661 "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
662 "call " GLOBAL_REFERENCE(function) "\n" \
664 "movsd (%esp), %xmm0 \n" \
670 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
672 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
674 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
675 #define defineUnaryDoubleOpWrapper(function) \
678 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
679 HIDE_SYMBOL(function##Thunk) "\n" \
680 SYMBOL_STRING(function##Thunk) ":" "\n" \
682 "movsd %xmm0, (%esp) \n" \
683 "call " GLOBAL_REFERENCE(function) "\n" \
685 "movsd (%esp), %xmm0 \n" \
690 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
692 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
694 #elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
696 #define defineUnaryDoubleOpWrapper(function) \
700 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
701 HIDE_SYMBOL(function##Thunk) "\n" \
703 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
704 SYMBOL_STRING(function##Thunk) ":" "\n" \
706 "vmov r0, r1, d0\n" \
707 "blx " GLOBAL_REFERENCE(function) "\n" \
708 "vmov d0, r0, r1\n" \
713 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
715 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
719 #define defineUnaryDoubleOpWrapper(function) \
723 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
724 HIDE_SYMBOL(function##Thunk) "\n" \
725 SYMBOL_STRING(function##Thunk) ":" "\n" \
726 "b " GLOBAL_REFERENCE(function) "\n" \
730 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
732 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
734 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
736 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
737 static double (_cdecl *floorFunction)(double) = floor;
738 static double (_cdecl *ceilFunction)(double) = ceil;
739 static double (_cdecl *truncFunction)(double) = trunc;
740 static double (_cdecl *expFunction)(double) = exp;
741 static double (_cdecl *logFunction)(double) = log;
742 static double (_cdecl *jsRoundFunction)(double) = jsRound;
744 #define defineUnaryDoubleOpWrapper(function) \
745 extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
750 __asm movsd mmword ptr [esp], xmm0 \
751 __asm call function##Function \
752 __asm fstp qword ptr [esp] \
753 __asm movsd xmm0, mmword ptr [esp] \
758 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
762 #define defineUnaryDoubleOpWrapper(function) \
763 static MathThunk UnaryDoubleOpWrapper(function) = 0
766 defineUnaryDoubleOpWrapper(jsRound);
767 defineUnaryDoubleOpWrapper(exp);
768 defineUnaryDoubleOpWrapper(log);
769 defineUnaryDoubleOpWrapper(floor);
770 defineUnaryDoubleOpWrapper(ceil);
771 defineUnaryDoubleOpWrapper(trunc);
773 static const double oneConstant = 1.0;
774 static const double negativeHalfConstant = -0.5;
775 static const double halfConstant = 0.5;
777 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
779 SpecializedThunkJIT jit(vm, 1);
780 MacroAssembler::Jump nonIntJump;
781 if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
782 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
783 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
784 jit.returnInt32(SpecializedThunkJIT::regT0);
785 nonIntJump.link(&jit);
786 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
788 if (jit.supportsFloatingPointRounding()) {
789 SpecializedThunkJIT::JumpList doubleResult;
790 jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
791 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
792 jit.returnInt32(SpecializedThunkJIT::regT0);
793 doubleResult.link(&jit);
794 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
795 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
798 SpecializedThunkJIT::Jump intResult;
799 SpecializedThunkJIT::JumpList doubleResult;
800 if (jit.supportsFloatingPointTruncate()) {
801 jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
802 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
803 SpecializedThunkJIT::JumpList slowPath;
804 // Handle the negative doubles in the slow path for now.
805 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
806 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
807 intResult = jit.jump();
810 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
811 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
812 if (jit.supportsFloatingPointTruncate())
813 intResult.link(&jit);
814 jit.returnInt32(SpecializedThunkJIT::regT0);
815 doubleResult.link(&jit);
816 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
817 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
820 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
822 SpecializedThunkJIT jit(vm, 1);
823 if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
824 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
825 MacroAssembler::Jump nonIntJump;
826 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
827 jit.returnInt32(SpecializedThunkJIT::regT0);
828 nonIntJump.link(&jit);
829 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
830 if (jit.supportsFloatingPointRounding())
831 jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
833 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
835 SpecializedThunkJIT::JumpList doubleResult;
836 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
837 jit.returnInt32(SpecializedThunkJIT::regT0);
838 doubleResult.link(&jit);
839 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
840 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
843 MacroAssemblerCodeRef truncThunkGenerator(VM* vm)
845 SpecializedThunkJIT jit(vm, 1);
846 if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
847 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
848 MacroAssembler::Jump nonIntJump;
849 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
850 jit.returnInt32(SpecializedThunkJIT::regT0);
851 nonIntJump.link(&jit);
852 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
853 if (jit.supportsFloatingPointRounding())
854 jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
856 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
858 SpecializedThunkJIT::JumpList doubleResult;
859 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
860 jit.returnInt32(SpecializedThunkJIT::regT0);
861 doubleResult.link(&jit);
862 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
863 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "trunc");
866 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
868 SpecializedThunkJIT jit(vm, 1);
869 if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
870 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
871 MacroAssembler::Jump nonIntJump;
872 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
873 jit.returnInt32(SpecializedThunkJIT::regT0);
874 nonIntJump.link(&jit);
875 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
876 SpecializedThunkJIT::Jump intResult;
877 SpecializedThunkJIT::JumpList doubleResult;
878 if (jit.supportsFloatingPointTruncate()) {
879 jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
880 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
881 SpecializedThunkJIT::JumpList slowPath;
882 // Handle the negative doubles in the slow path for now.
883 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
884 jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
885 jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
886 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
887 intResult = jit.jump();
890 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
891 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
892 if (jit.supportsFloatingPointTruncate())
893 intResult.link(&jit);
894 jit.returnInt32(SpecializedThunkJIT::regT0);
895 doubleResult.link(&jit);
896 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
897 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
900 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
902 if (!UnaryDoubleOpWrapper(exp))
903 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
904 SpecializedThunkJIT jit(vm, 1);
905 if (!jit.supportsFloatingPoint())
906 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
907 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
908 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
909 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
910 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
913 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
915 if (!UnaryDoubleOpWrapper(log))
916 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
917 SpecializedThunkJIT jit(vm, 1);
918 if (!jit.supportsFloatingPoint())
919 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
920 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
921 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
922 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
923 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
926 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
928 SpecializedThunkJIT jit(vm, 1);
929 if (!jit.supportsFloatingPointAbs())
930 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
931 MacroAssembler::Jump nonIntJump;
932 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
933 jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
934 jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
935 jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
936 jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
937 jit.returnInt32(SpecializedThunkJIT::regT0);
938 nonIntJump.link(&jit);
939 // Shame about the double int conversion here.
940 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
941 jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
942 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
943 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
946 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
948 SpecializedThunkJIT jit(vm, 2);
949 if (!jit.supportsFloatingPoint())
950 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
952 jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
953 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
954 MacroAssembler::Jump nonIntExponent;
955 jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
956 jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
958 MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
959 MacroAssembler::Label startLoop(jit.label());
961 MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
962 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
963 exponentIsEven.link(&jit);
964 jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
965 jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
966 jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
968 exponentIsZero.link(&jit);
971 SpecializedThunkJIT::JumpList doubleResult;
972 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
973 jit.returnInt32(SpecializedThunkJIT::regT0);
974 doubleResult.link(&jit);
975 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
978 if (jit.supportsFloatingPointSqrt()) {
979 nonIntExponent.link(&jit);
980 jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
981 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
982 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
983 jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
984 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
985 jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
987 SpecializedThunkJIT::JumpList doubleResult;
988 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
989 jit.returnInt32(SpecializedThunkJIT::regT0);
990 doubleResult.link(&jit);
991 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
993 jit.appendFailure(nonIntExponent);
995 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
998 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
1000 SpecializedThunkJIT jit(vm, 2);
1001 MacroAssembler::Jump nonIntArg0Jump;
1002 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1003 SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1004 MacroAssembler::Jump nonIntArg1Jump;
1005 jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1006 SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1007 jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1008 jit.returnInt32(SpecializedThunkJIT::regT0);
1010 if (jit.supportsFloatingPointTruncate()) {
1011 nonIntArg0Jump.link(&jit);
1012 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1013 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1014 jit.appendFailure(jit.jump());
1016 jit.appendFailure(nonIntArg0Jump);
1018 if (jit.supportsFloatingPointTruncate()) {
1019 nonIntArg1Jump.link(&jit);
1020 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1021 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1022 jit.appendFailure(jit.jump());
1024 jit.appendFailure(nonIntArg1Jump);
1026 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1029 MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
1031 SpecializedThunkJIT jit(vm, 0);
1032 if (!jit.supportsFloatingPoint())
1033 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1036 jit.emitRandomThunk(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
1037 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1039 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
1041 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1047 #endif // ENABLE(JIT)