[JSC] Add basic DFG/FTL support for Math.round
[WebKit-https.git] / Source / JavaScriptCore / jit / ThunkGenerators.cpp
1 /*
2  * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
32 #include "JSArray.h"
33 #include "JSArrayIterator.h"
34 #include "JSStack.h"
35 #include "MathCommon.h"
36 #include "MaxFrameExtentForSlowPathCall.h"
37 #include "JSCInlines.h"
38 #include "SpecializedThunkJIT.h"
39 #include <wtf/InlineASM.h>
40 #include <wtf/StringPrintStream.h>
41 #include <wtf/text/StringImpl.h>
42
43 #if ENABLE(JIT)
44
45 namespace JSC {
46
47 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
48 {
49     if (ASSERT_DISABLED)
50         return;
51     CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
52     jit.abortWithReason(TGInvalidPointer);
53     isNonZero.link(&jit);
54     jit.pushToSave(pointerGPR);
55     jit.load8(pointerGPR, pointerGPR);
56     jit.popToRestore(pointerGPR);
57 }
58
59 // We will jump here if the JIT code tries to make a call, but the
60 // linking helper (C++ code) decides to throw an exception instead.
61 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
62 {
63     CCallHelpers jit(vm);
64     
65     // The call pushed a return address, so we need to pop it back off to re-align the stack,
66     // even though we won't use it.
67     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
68
69     jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
70     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
71     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
72     jit.call(GPRInfo::nonArgGPR0);
73     jit.jumpToExceptionHandler();
74
75     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
76     return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
77 }
78
79 static void slowPathFor(
80     CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction)
81 {
82     jit.emitFunctionPrologue();
83     jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
84     if (maxFrameExtentForSlowPathCall)
85         jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
86     jit.setupArgumentsWithExecState(GPRInfo::regT2);
87     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
88     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
89     jit.call(GPRInfo::nonArgGPR0);
90     if (maxFrameExtentForSlowPathCall)
91         jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
92     
93     // This slow call will return the address of one of the following:
94     // 1) Exception throwing thunk.
95     // 2) Host call return value returner thingy.
96     // 3) The function to call.
97     emitPointerValidation(jit, GPRInfo::returnValueGPR);
98     jit.emitFunctionEpilogue();
99     jit.jump(GPRInfo::returnValueGPR);
100 }
101
102 static MacroAssemblerCodeRef linkForThunkGenerator(
103     VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
104 {
105     // The return address is on the stack or in the link register. We will hence
106     // save the return address to the call frame while we make a C++ function call
107     // to perform linking and lazy compilation if necessary. We expect the callee
108     // to be in regT0/regT1 (payload/tag), the CallFrame to have already
109     // been adjusted, and all other registers to be available for use.
110     
111     CCallHelpers jit(vm);
112     
113     slowPathFor(jit, vm, operationLinkFor(kind, registers));
114     
115     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
116     return FINALIZE_CODE(
117         patchBuffer,
118         ("Link %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
119 }
120
121 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
122 {
123     return linkForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
124 }
125
126 MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
127 {
128     return linkForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
129 }
130
131 MacroAssemblerCodeRef linkCallThatPreservesRegsThunkGenerator(VM* vm)
132 {
133     return linkForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
134 }
135
136 MacroAssemblerCodeRef linkConstructThatPreservesRegsThunkGenerator(VM* vm)
137 {
138     return linkForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
139 }
140
141 static MacroAssemblerCodeRef linkPolymorphicCallForThunkGenerator(
142     VM* vm, RegisterPreservationMode registers)
143 {
144     CCallHelpers jit(vm);
145     
146     slowPathFor(jit, vm, operationLinkPolymorphicCallFor(registers));
147     
148     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
149     return FINALIZE_CODE(patchBuffer, ("Link polymorphic call %s slow path thunk", registers == MustPreserveRegisters ? " that preserves registers" : ""));
150 }
151
152 // For closure optimizations, we only include calls, since if you're using closures for
153 // object construction then you're going to lose big time anyway.
154 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
155 {
156     return linkPolymorphicCallForThunkGenerator(vm, RegisterPreservationNotRequired);
157 }
158
159 MacroAssemblerCodeRef linkPolymorphicCallThatPreservesRegsThunkGenerator(VM* vm)
160 {
161     return linkPolymorphicCallForThunkGenerator(vm, MustPreserveRegisters);
162 }
163
164 static MacroAssemblerCodeRef virtualForThunkGenerator(
165     VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
166 {
167     // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
168     // The return address is on the stack, or in the link register. We will hence
169     // jump to the callee, or save the return address to the call frame while we
170     // make a C++ function call to the appropriate JIT operation.
171
172     CCallHelpers jit(vm);
173     
174     CCallHelpers::JumpList slowCase;
175     
176     // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
177     // slow path execution for the profiler.
178     jit.add32(
179         CCallHelpers::TrustedImm32(1),
180         CCallHelpers::Address(GPRInfo::regT2, OBJECT_OFFSETOF(CallLinkInfo, slowPathCount)));
181
182     // FIXME: we should have a story for eliminating these checks. In many cases,
183     // the DFG knows that the value is definitely a cell, or definitely a function.
184     
185 #if USE(JSVALUE64)
186     jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
187     
188     slowCase.append(
189         jit.branchTest64(
190             CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
191 #else
192     slowCase.append(
193         jit.branch32(
194             CCallHelpers::NotEqual, GPRInfo::regT1,
195             CCallHelpers::TrustedImm32(JSValue::CellTag)));
196 #endif
197     AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
198     slowCase.append(
199         jit.branchPtr(
200             CCallHelpers::NotEqual,
201             CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
202             CCallHelpers::TrustedImmPtr(JSFunction::info())));
203     
204     // Now we know we have a JSFunction.
205     
206     jit.loadPtr(
207         CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
208         GPRInfo::regT4);
209     jit.loadPtr(
210         CCallHelpers::Address(
211             GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
212         GPRInfo::regT4);
213     slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
214     
215     // Now we know that we have a CodeBlock, and we're committed to making a fast
216     // call.
217     
218     // Make a tail call. This will return back to JIT code.
219     emitPointerValidation(jit, GPRInfo::regT4);
220     jit.jump(GPRInfo::regT4);
221
222     slowCase.link(&jit);
223     
224     // Here we don't know anything, so revert to the full slow path.
225     
226     slowPathFor(jit, vm, operationVirtualFor(kind, registers));
227     
228     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
229     return FINALIZE_CODE(
230         patchBuffer,
231         ("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
232 }
233
234 MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
235 {
236     return virtualForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
237 }
238
239 MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
240 {
241     return virtualForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
242 }
243
244 MacroAssemblerCodeRef virtualCallThatPreservesRegsThunkGenerator(VM* vm)
245 {
246     return virtualForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
247 }
248
249 MacroAssemblerCodeRef virtualConstructThatPreservesRegsThunkGenerator(VM* vm)
250 {
251     return virtualForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
252 }
253
254 enum ThunkEntryType { EnterViaCall, EnterViaJump };
255
256 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
257 {
258     int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
259     
260     JSInterfaceJIT jit(vm);
261
262     if (entryType == EnterViaCall)
263         jit.emitFunctionPrologue();
264
265     jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
266     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
267
268 #if CPU(X86)
269     // Calling convention:      f(ecx, edx, ...);
270     // Host function signature: f(ExecState*);
271     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
272
273     jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
274
275     // call the function
276     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
277     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
278     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
279
280     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
281
282 #elif CPU(X86_64)
283 #if !OS(WINDOWS)
284     // Calling convention:      f(edi, esi, edx, ecx, ...);
285     // Host function signature: f(ExecState*);
286     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
287
288     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
289     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
290     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
291
292 #else
293     // Calling convention:      f(ecx, edx, r8, r9, ...);
294     // Host function signature: f(ExecState*);
295     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
296
297     // Leave space for the callee parameter home addresses.
298     // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
299     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
300
301     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
302     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
303     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
304
305     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
306 #endif
307
308 #elif CPU(ARM64)
309     COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
310     COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
311     COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
312     COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
313     COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
314
315     // Host function signature: f(ExecState*);
316     jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
317
318     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
319     jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
320     jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
321 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
322 #if CPU(MIPS)
323     // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
324     jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
325 #endif
326
327     // Calling convention is f(argumentGPR0, argumentGPR1, ...).
328     // Host function signature is f(ExecState*).
329     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
330
331     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
332     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
333     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
334
335 #if CPU(MIPS)
336     // Restore stack space
337     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
338 #endif
339 #else
340 #error "JIT not supported on this platform."
341     UNUSED_PARAM(executableOffsetToFunction);
342     abortWithReason(TGNotSupported);
343 #endif
344
345     // Check for an exception
346 #if USE(JSVALUE64)
347     jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
348     JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
349 #else
350     JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
351         JSInterfaceJIT::NotEqual,
352         JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
353         JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
354 #endif
355
356     jit.emitFunctionEpilogue();
357     // Return.
358     jit.ret();
359
360     // Handle an exception
361     exceptionHandler.link(&jit);
362
363     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
364
365 #if CPU(X86) && USE(JSVALUE32_64)
366     jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
367     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0);
368     jit.push(JSInterfaceJIT::regT0);
369 #else
370 #if OS(WINDOWS)
371     // Allocate space on stack for the 4 parameter registers.
372     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
373 #endif
374     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0);
375 #endif
376     jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
377     jit.call(JSInterfaceJIT::regT3);
378 #if CPU(X86) && USE(JSVALUE32_64)
379     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
380 #elif OS(WINDOWS)
381     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
382 #endif
383
384     jit.jumpToExceptionHandler();
385
386     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
387     return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
388 }
389
390 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
391 {
392     return nativeForGenerator(vm, CodeForCall);
393 }
394
395 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
396 {
397     return nativeForGenerator(vm, CodeForCall, EnterViaJump);
398 }
399
400 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
401 {
402     return nativeForGenerator(vm, CodeForConstruct);
403 }
404
405 MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
406 {
407     JSInterfaceJIT jit(vm);
408
409     // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in
410     // regT5 on 32-bit and regT7 on 64-bit.
411 #if USE(JSVALUE64)
412 #  if CPU(X86_64)
413     jit.pop(JSInterfaceJIT::regT4);
414 #  endif
415     jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
416     jit.neg64(JSInterfaceJIT::regT0);
417     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6);
418     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
419     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
420
421     // Move current frame down regT0 number of slots
422     JSInterfaceJIT::Label copyLoop(jit.label());
423     jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1);
424     jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
425     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
426     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
427
428     // Fill in regT0 - 1 missing arg slots with undefined
429     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
430     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
431     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
432     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
433     jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
434     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
435     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
436     
437     // Adjust call frame register and stack pointer to account for missing args
438     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
439     jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
440     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
441     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
442
443     // Save the original return PC.
444     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
445     jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
446     
447     // Install the new return PC.
448     jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
449
450 #  if CPU(X86_64)
451     jit.push(JSInterfaceJIT::regT4);
452 #  endif
453     jit.ret();
454 #else
455 #  if CPU(X86)
456     jit.pop(JSInterfaceJIT::regT4);
457 #  endif
458     jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
459     jit.neg32(JSInterfaceJIT::regT0);
460     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
461     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
462     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
463
464     // Move current frame down regT0 number of slots
465     JSInterfaceJIT::Label copyLoop(jit.label());
466     jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
467     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
468     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
469     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
470     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
471     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
472
473     // Fill in regT0 - 1 missing arg slots with undefined
474     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
475     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
476     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
477     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
478     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
479     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
480     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
481
482     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
483     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
484
485     // Adjust call frame register and stack pointer to account for missing args
486     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
487     jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
488     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
489     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
490
491     // Save the original return PC.
492     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
493     jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
494     
495     // Install the new return PC.
496     jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
497     
498 #  if CPU(X86)
499     jit.push(JSInterfaceJIT::regT4);
500 #  endif
501     jit.ret();
502 #endif
503
504     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
505     return FINALIZE_CODE(patchBuffer, ("fixup arity"));
506 }
507
508 MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm)
509 {
510     JSInterfaceJIT jit(vm);
511     
512 #if USE(JSVALUE64)
513     jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0);
514 #else
515     jit.setupResults(GPRInfo::regT0, GPRInfo::regT1);
516 #endif
517     
518     unsigned numberOfParameters = 0;
519     numberOfParameters++; // The 'this' argument.
520     numberOfParameters++; // The true return PC.
521     
522     unsigned numberOfRegsForCall =
523         JSStack::CallFrameHeaderSize + numberOfParameters;
524     
525     unsigned numberOfBytesForCall =
526         numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
527     
528     unsigned alignedNumberOfBytesForCall =
529         WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
530             
531     // The real return address is stored above the arguments. We passed one argument, which is
532     // 'this'. So argument at index 1 is the return address.
533     jit.loadPtr(
534         AssemblyHelpers::Address(
535             AssemblyHelpers::stackPointerRegister,
536             (virtualRegisterForArgument(1).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)),
537         GPRInfo::regT2);
538     
539     jit.addPtr(
540         AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall),
541         AssemblyHelpers::stackPointerRegister);
542     
543     jit.jump(GPRInfo::regT2);
544
545     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
546     return FINALIZE_CODE(patchBuffer, ("baseline getter return thunk"));
547 }
548
549 MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm)
550 {
551     JSInterfaceJIT jit(vm);
552     
553     unsigned numberOfParameters = 0;
554     numberOfParameters++; // The 'this' argument.
555     numberOfParameters++; // The value to set.
556     numberOfParameters++; // The true return PC.
557     
558     unsigned numberOfRegsForCall =
559         JSStack::CallFrameHeaderSize + numberOfParameters;
560     
561     unsigned numberOfBytesForCall =
562         numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
563     
564     unsigned alignedNumberOfBytesForCall =
565         WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
566             
567     // The real return address is stored above the arguments. We passed two arguments, so
568     // the argument at index 2 is the return address.
569     jit.loadPtr(
570         AssemblyHelpers::Address(
571             AssemblyHelpers::stackPointerRegister,
572             (virtualRegisterForArgument(2).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)),
573         GPRInfo::regT2);
574     
575     jit.addPtr(
576         AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall),
577         AssemblyHelpers::stackPointerRegister);
578     
579     jit.jump(GPRInfo::regT2);
580
581     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
582     return FINALIZE_CODE(patchBuffer, ("baseline setter return thunk"));
583 }
584
585 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
586 {
587     // load string
588     jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
589
590     // Load string length to regT2, and start the process of loading the data pointer into regT0
591     jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
592     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
593     jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
594
595     // load index
596     jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
597
598     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
599     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
600
601     // Load the character
602     SpecializedThunkJIT::JumpList is16Bit;
603     SpecializedThunkJIT::JumpList cont8Bit;
604     // Load the string flags
605     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
606     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
607     is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
608     jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
609     cont8Bit.append(jit.jump());
610     is16Bit.link(&jit);
611     jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
612     cont8Bit.link(&jit);
613 }
614
615 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
616 {
617     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
618     jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
619     jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
620     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
621 }
622
623 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
624 {
625     SpecializedThunkJIT jit(vm, 1);
626     stringCharLoad(jit, vm);
627     jit.returnInt32(SpecializedThunkJIT::regT0);
628     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
629 }
630
631 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
632 {
633     SpecializedThunkJIT jit(vm, 1);
634     stringCharLoad(jit, vm);
635     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
636     jit.returnJSCell(SpecializedThunkJIT::regT0);
637     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
638 }
639
640 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
641 {
642     SpecializedThunkJIT jit(vm, 1);
643     // load char code
644     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
645     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
646     jit.returnJSCell(SpecializedThunkJIT::regT0);
647     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
648 }
649
650 MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
651 {
652     SpecializedThunkJIT jit(vm, 1);
653     MacroAssembler::Jump nonIntArgJump;
654     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
655
656     SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
657     jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
658     jit.returnInt32(SpecializedThunkJIT::regT1);
659
660     if (jit.supportsFloatingPointTruncate()) {
661         nonIntArgJump.link(&jit);
662         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
663         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
664         jit.appendFailure(jit.jump());
665     } else
666         jit.appendFailure(nonIntArgJump);
667
668     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
669 }
670
671 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
672 {
673     SpecializedThunkJIT jit(vm, 1);
674     if (!jit.supportsFloatingPointSqrt())
675         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
676
677     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
678     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
679     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
680     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
681 }
682
683
684 #define UnaryDoubleOpWrapper(function) function##Wrapper
685 enum MathThunkCallingConvention { };
686 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
687
688 #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
689
690 #define defineUnaryDoubleOpWrapper(function) \
691     asm( \
692         ".text\n" \
693         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
694         HIDE_SYMBOL(function##Thunk) "\n" \
695         SYMBOL_STRING(function##Thunk) ":" "\n" \
696         "pushq %rax\n" \
697         "call " GLOBAL_REFERENCE(function) "\n" \
698         "popq %rcx\n" \
699         "ret\n" \
700     );\
701     extern "C" { \
702         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
703     } \
704     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
705
706 #elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__)
707 #define defineUnaryDoubleOpWrapper(function) \
708     asm( \
709         ".text\n" \
710         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
711         HIDE_SYMBOL(function##Thunk) "\n" \
712         SYMBOL_STRING(function##Thunk) ":" "\n" \
713         "pushl %ebx\n" \
714         "subl $20, %esp\n" \
715         "movsd %xmm0, (%esp) \n" \
716         "call __x86.get_pc_thunk.bx\n" \
717         "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
718         "call " GLOBAL_REFERENCE(function) "\n" \
719         "fstpl (%esp) \n" \
720         "movsd (%esp), %xmm0 \n" \
721         "addl $20, %esp\n" \
722         "popl %ebx\n" \
723         "ret\n" \
724     );\
725     extern "C" { \
726         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
727     } \
728     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
729
730 #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
731 #define defineUnaryDoubleOpWrapper(function) \
732     asm( \
733         ".text\n" \
734         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
735         HIDE_SYMBOL(function##Thunk) "\n" \
736         SYMBOL_STRING(function##Thunk) ":" "\n" \
737         "subl $20, %esp\n" \
738         "movsd %xmm0, (%esp) \n" \
739         "call " GLOBAL_REFERENCE(function) "\n" \
740         "fstpl (%esp) \n" \
741         "movsd (%esp), %xmm0 \n" \
742         "addl $20, %esp\n" \
743         "ret\n" \
744     );\
745     extern "C" { \
746         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
747     } \
748     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
749
750 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
751
752 #define defineUnaryDoubleOpWrapper(function) \
753     asm( \
754         ".text\n" \
755         ".align 2\n" \
756         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
757         HIDE_SYMBOL(function##Thunk) "\n" \
758         ".thumb\n" \
759         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
760         SYMBOL_STRING(function##Thunk) ":" "\n" \
761         "push {lr}\n" \
762         "vmov r0, r1, d0\n" \
763         "blx " GLOBAL_REFERENCE(function) "\n" \
764         "vmov d0, r0, r1\n" \
765         "pop {lr}\n" \
766         "bx lr\n" \
767     ); \
768     extern "C" { \
769         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
770     } \
771     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
772
773 #elif CPU(ARM64)
774
775 #define defineUnaryDoubleOpWrapper(function) \
776     asm( \
777         ".text\n" \
778         ".align 2\n" \
779         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
780         HIDE_SYMBOL(function##Thunk) "\n" \
781         SYMBOL_STRING(function##Thunk) ":" "\n" \
782         "b " GLOBAL_REFERENCE(function) "\n" \
783         ".previous" \
784     ); \
785     extern "C" { \
786         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
787     } \
788     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
789
790 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
791
792 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
793 static double (_cdecl *floorFunction)(double) = floor;
794 static double (_cdecl *ceilFunction)(double) = ceil;
795 static double (_cdecl *expFunction)(double) = exp;
796 static double (_cdecl *logFunction)(double) = log;
797 static double (_cdecl *jsRoundFunction)(double) = jsRound;
798
799 #define defineUnaryDoubleOpWrapper(function) \
800     extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
801     { \
802         __asm \
803         { \
804         __asm sub esp, 20 \
805         __asm movsd mmword ptr [esp], xmm0  \
806         __asm call function##Function \
807         __asm fstp qword ptr [esp] \
808         __asm movsd xmm0, mmword ptr [esp] \
809         __asm add esp, 20 \
810         __asm ret \
811         } \
812     } \
813     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
814
815 #else
816
817 #define defineUnaryDoubleOpWrapper(function) \
818     static MathThunk UnaryDoubleOpWrapper(function) = 0
819 #endif
820
821 defineUnaryDoubleOpWrapper(jsRound);
822 defineUnaryDoubleOpWrapper(exp);
823 defineUnaryDoubleOpWrapper(log);
824 defineUnaryDoubleOpWrapper(floor);
825 defineUnaryDoubleOpWrapper(ceil);
826
827 static const double oneConstant = 1.0;
828 static const double negativeHalfConstant = -0.5;
829 static const double zeroConstant = 0.0;
830 static const double halfConstant = 0.5;
831     
832 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
833 {
834     SpecializedThunkJIT jit(vm, 1);
835     MacroAssembler::Jump nonIntJump;
836     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
837         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
838     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
839     jit.returnInt32(SpecializedThunkJIT::regT0);
840     nonIntJump.link(&jit);
841     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
842 #if CPU(ARM64)
843     SpecializedThunkJIT::JumpList doubleResult;
844     jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
845     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
846     jit.returnInt32(SpecializedThunkJIT::regT0);
847     doubleResult.link(&jit);
848     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
849 #else
850     SpecializedThunkJIT::Jump intResult;
851     SpecializedThunkJIT::JumpList doubleResult;
852     if (jit.supportsFloatingPointTruncate()) {
853         jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
854         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
855         SpecializedThunkJIT::JumpList slowPath;
856         // Handle the negative doubles in the slow path for now.
857         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
858         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
859         intResult = jit.jump();
860         slowPath.link(&jit);
861     }
862     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
863     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
864     if (jit.supportsFloatingPointTruncate())
865         intResult.link(&jit);
866     jit.returnInt32(SpecializedThunkJIT::regT0);
867     doubleResult.link(&jit);
868     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
869 #endif // CPU(ARM64)
870     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
871 }
872
873 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
874 {
875     SpecializedThunkJIT jit(vm, 1);
876     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
877         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
878     MacroAssembler::Jump nonIntJump;
879     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
880     jit.returnInt32(SpecializedThunkJIT::regT0);
881     nonIntJump.link(&jit);
882     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
883 #if CPU(ARM64)
884     jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
885 #else
886     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
887 #endif // CPU(ARM64)
888     SpecializedThunkJIT::JumpList doubleResult;
889     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
890     jit.returnInt32(SpecializedThunkJIT::regT0);
891     doubleResult.link(&jit);
892     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
893     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
894 }
895
896 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
897 {
898     SpecializedThunkJIT jit(vm, 1);
899     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
900         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
901     MacroAssembler::Jump nonIntJump;
902     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
903     jit.returnInt32(SpecializedThunkJIT::regT0);
904     nonIntJump.link(&jit);
905     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
906     SpecializedThunkJIT::Jump intResult;
907     SpecializedThunkJIT::JumpList doubleResult;
908     if (jit.supportsFloatingPointTruncate()) {
909         jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
910         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
911         SpecializedThunkJIT::JumpList slowPath;
912         // Handle the negative doubles in the slow path for now.
913         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
914         jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
915         jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
916         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
917         intResult = jit.jump();
918         slowPath.link(&jit);
919     }
920     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
921     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
922     if (jit.supportsFloatingPointTruncate())
923         intResult.link(&jit);
924     jit.returnInt32(SpecializedThunkJIT::regT0);
925     doubleResult.link(&jit);
926     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
927     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
928 }
929
930 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
931 {
932     if (!UnaryDoubleOpWrapper(exp))
933         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
934     SpecializedThunkJIT jit(vm, 1);
935     if (!jit.supportsFloatingPoint())
936         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
937     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
938     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
939     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
940     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
941 }
942
943 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
944 {
945     if (!UnaryDoubleOpWrapper(log))
946         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
947     SpecializedThunkJIT jit(vm, 1);
948     if (!jit.supportsFloatingPoint())
949         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
950     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
951     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
952     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
953     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
954 }
955
956 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
957 {
958     SpecializedThunkJIT jit(vm, 1);
959     if (!jit.supportsFloatingPointAbs())
960         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
961     MacroAssembler::Jump nonIntJump;
962     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
963     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
964     jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
965     jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
966     jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
967     jit.returnInt32(SpecializedThunkJIT::regT0);
968     nonIntJump.link(&jit);
969     // Shame about the double int conversion here.
970     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
971     jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
972     jit.returnDouble(SpecializedThunkJIT::fpRegT1);
973     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
974 }
975
976 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
977 {
978     SpecializedThunkJIT jit(vm, 2);
979     if (!jit.supportsFloatingPoint())
980         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
981
982     jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
983     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
984     MacroAssembler::Jump nonIntExponent;
985     jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
986     jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
987     
988     MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
989     MacroAssembler::Label startLoop(jit.label());
990
991     MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
992     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
993     exponentIsEven.link(&jit);
994     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
995     jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
996     jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
997
998     exponentIsZero.link(&jit);
999
1000     {
1001         SpecializedThunkJIT::JumpList doubleResult;
1002         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
1003         jit.returnInt32(SpecializedThunkJIT::regT0);
1004         doubleResult.link(&jit);
1005         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
1006     }
1007
1008     if (jit.supportsFloatingPointSqrt()) {
1009         nonIntExponent.link(&jit);
1010         jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
1011         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
1012         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
1013         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
1014         jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
1015         jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
1016
1017         SpecializedThunkJIT::JumpList doubleResult;
1018         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
1019         jit.returnInt32(SpecializedThunkJIT::regT0);
1020         doubleResult.link(&jit);
1021         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
1022     } else
1023         jit.appendFailure(nonIntExponent);
1024
1025     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
1026 }
1027
1028 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
1029 {
1030     SpecializedThunkJIT jit(vm, 2);
1031     MacroAssembler::Jump nonIntArg0Jump;
1032     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1033     SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1034     MacroAssembler::Jump nonIntArg1Jump;
1035     jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1036     SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1037     jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1038     jit.returnInt32(SpecializedThunkJIT::regT0);
1039
1040     if (jit.supportsFloatingPointTruncate()) {
1041         nonIntArg0Jump.link(&jit);
1042         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1043         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1044         jit.appendFailure(jit.jump());
1045     } else
1046         jit.appendFailure(nonIntArg0Jump);
1047
1048     if (jit.supportsFloatingPointTruncate()) {
1049         nonIntArg1Jump.link(&jit);
1050         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1051         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1052         jit.appendFailure(jit.jump());
1053     } else
1054         jit.appendFailure(nonIntArg1Jump);
1055
1056     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1057 }
1058
1059 }
1060
1061 #endif // ENABLE(JIT)