df7864b0c8d5d4522cbe37b06810b61426fd4adf
[WebKit-https.git] / Source / JavaScriptCore / jit / ThunkGenerators.cpp
1 /*
2  * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
32 #include "JSArray.h"
33 #include "JSArrayIterator.h"
34 #include "JSStack.h"
35 #include "MaxFrameExtentForSlowPathCall.h"
36 #include "JSCInlines.h"
37 #include "SpecializedThunkJIT.h"
38 #include <wtf/InlineASM.h>
39 #include <wtf/StringPrintStream.h>
40 #include <wtf/text/StringImpl.h>
41
42 #if ENABLE(JIT)
43
44 namespace JSC {
45
46 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
47 {
48     if (ASSERT_DISABLED)
49         return;
50     CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
51     jit.abortWithReason(TGInvalidPointer);
52     isNonZero.link(&jit);
53     jit.pushToSave(pointerGPR);
54     jit.load8(pointerGPR, pointerGPR);
55     jit.popToRestore(pointerGPR);
56 }
57
58 // We will jump here if the JIT code tries to make a call, but the
59 // linking helper (C++ code) decides to throw an exception instead.
60 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
61 {
62     CCallHelpers jit(vm);
63     
64     // The call pushed a return address, so we need to pop it back off to re-align the stack,
65     // even though we won't use it.
66     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
67
68     jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
69     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
70     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
71     jit.call(GPRInfo::nonArgGPR0);
72     jit.jumpToExceptionHandler();
73
74     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
75     return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
76 }
77
78 static void slowPathFor(
79     CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction)
80 {
81     jit.emitFunctionPrologue();
82     jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
83     if (maxFrameExtentForSlowPathCall)
84         jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
85     jit.setupArgumentsWithExecState(GPRInfo::regT2);
86     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
87     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
88     jit.call(GPRInfo::nonArgGPR0);
89     if (maxFrameExtentForSlowPathCall)
90         jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
91     
92     // This slow call will return the address of one of the following:
93     // 1) Exception throwing thunk.
94     // 2) Host call return value returner thingy.
95     // 3) The function to call.
96     emitPointerValidation(jit, GPRInfo::returnValueGPR);
97     jit.emitFunctionEpilogue();
98     jit.jump(GPRInfo::returnValueGPR);
99 }
100
101 static MacroAssemblerCodeRef linkForThunkGenerator(
102     VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
103 {
104     // The return address is on the stack or in the link register. We will hence
105     // save the return address to the call frame while we make a C++ function call
106     // to perform linking and lazy compilation if necessary. We expect the callee
107     // to be in regT0/regT1 (payload/tag), the CallFrame to have already
108     // been adjusted, and all other registers to be available for use.
109     
110     CCallHelpers jit(vm);
111     
112     slowPathFor(jit, vm, operationLinkFor(kind, registers));
113     
114     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
115     return FINALIZE_CODE(
116         patchBuffer,
117         ("Link %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
118 }
119
120 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
121 {
122     return linkForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
123 }
124
125 MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
126 {
127     return linkForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
128 }
129
130 MacroAssemblerCodeRef linkCallThatPreservesRegsThunkGenerator(VM* vm)
131 {
132     return linkForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
133 }
134
135 MacroAssemblerCodeRef linkConstructThatPreservesRegsThunkGenerator(VM* vm)
136 {
137     return linkForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
138 }
139
140 static MacroAssemblerCodeRef linkPolymorphicCallForThunkGenerator(
141     VM* vm, RegisterPreservationMode registers)
142 {
143     CCallHelpers jit(vm);
144     
145     slowPathFor(jit, vm, operationLinkPolymorphicCallFor(registers));
146     
147     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
148     return FINALIZE_CODE(patchBuffer, ("Link polymorphic call %s slow path thunk", registers == MustPreserveRegisters ? " that preserves registers" : ""));
149 }
150
151 // For closure optimizations, we only include calls, since if you're using closures for
152 // object construction then you're going to lose big time anyway.
153 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
154 {
155     return linkPolymorphicCallForThunkGenerator(vm, RegisterPreservationNotRequired);
156 }
157
158 MacroAssemblerCodeRef linkPolymorphicCallThatPreservesRegsThunkGenerator(VM* vm)
159 {
160     return linkPolymorphicCallForThunkGenerator(vm, MustPreserveRegisters);
161 }
162
163 static MacroAssemblerCodeRef virtualForThunkGenerator(
164     VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
165 {
166     // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
167     // The return address is on the stack, or in the link register. We will hence
168     // jump to the callee, or save the return address to the call frame while we
169     // make a C++ function call to the appropriate JIT operation.
170
171     CCallHelpers jit(vm);
172     
173     CCallHelpers::JumpList slowCase;
174     
175     // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
176     // slow path execution for the profiler.
177     jit.add32(
178         CCallHelpers::TrustedImm32(1),
179         CCallHelpers::Address(GPRInfo::regT2, OBJECT_OFFSETOF(CallLinkInfo, slowPathCount)));
180
181     // FIXME: we should have a story for eliminating these checks. In many cases,
182     // the DFG knows that the value is definitely a cell, or definitely a function.
183     
184 #if USE(JSVALUE64)
185     jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
186     
187     slowCase.append(
188         jit.branchTest64(
189             CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
190 #else
191     slowCase.append(
192         jit.branch32(
193             CCallHelpers::NotEqual, GPRInfo::regT1,
194             CCallHelpers::TrustedImm32(JSValue::CellTag)));
195 #endif
196     AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
197     slowCase.append(
198         jit.branchPtr(
199             CCallHelpers::NotEqual,
200             CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
201             CCallHelpers::TrustedImmPtr(JSFunction::info())));
202     
203     // Now we know we have a JSFunction.
204     
205     jit.loadPtr(
206         CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
207         GPRInfo::regT4);
208     jit.loadPtr(
209         CCallHelpers::Address(
210             GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
211         GPRInfo::regT4);
212     slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
213     
214     // Now we know that we have a CodeBlock, and we're committed to making a fast
215     // call.
216     
217     // Make a tail call. This will return back to JIT code.
218     emitPointerValidation(jit, GPRInfo::regT4);
219     jit.jump(GPRInfo::regT4);
220
221     slowCase.link(&jit);
222     
223     // Here we don't know anything, so revert to the full slow path.
224     
225     slowPathFor(jit, vm, operationVirtualFor(kind, registers));
226     
227     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
228     return FINALIZE_CODE(
229         patchBuffer,
230         ("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
231 }
232
233 MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
234 {
235     return virtualForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
236 }
237
238 MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
239 {
240     return virtualForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
241 }
242
243 MacroAssemblerCodeRef virtualCallThatPreservesRegsThunkGenerator(VM* vm)
244 {
245     return virtualForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
246 }
247
248 MacroAssemblerCodeRef virtualConstructThatPreservesRegsThunkGenerator(VM* vm)
249 {
250     return virtualForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
251 }
252
253 enum ThunkEntryType { EnterViaCall, EnterViaJump };
254
255 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
256 {
257     int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
258     
259     JSInterfaceJIT jit(vm);
260
261     if (entryType == EnterViaCall)
262         jit.emitFunctionPrologue();
263
264     jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
265     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
266
267 #if CPU(X86)
268     // Calling convention:      f(ecx, edx, ...);
269     // Host function signature: f(ExecState*);
270     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
271
272     jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
273
274     // call the function
275     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
276     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
277     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
278
279     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
280
281 #elif CPU(X86_64)
282 #if !OS(WINDOWS)
283     // Calling convention:      f(edi, esi, edx, ecx, ...);
284     // Host function signature: f(ExecState*);
285     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
286
287     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
288     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
289     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
290
291 #else
292     // Calling convention:      f(ecx, edx, r8, r9, ...);
293     // Host function signature: f(ExecState*);
294     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
295
296     // Leave space for the callee parameter home addresses.
297     // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
298     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
299
300     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
301     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
302     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
303
304     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
305 #endif
306
307 #elif CPU(ARM64)
308     COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
309     COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
310     COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
311     COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
312     COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
313
314     // Host function signature: f(ExecState*);
315     jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
316
317     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
318     jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
319     jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
320 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
321 #if CPU(MIPS)
322     // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
323     jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
324 #endif
325
326     // Calling convention is f(argumentGPR0, argumentGPR1, ...).
327     // Host function signature is f(ExecState*).
328     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
329
330     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
331     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
332     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
333
334 #if CPU(MIPS)
335     // Restore stack space
336     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
337 #endif
338 #else
339 #error "JIT not supported on this platform."
340     UNUSED_PARAM(executableOffsetToFunction);
341     abortWithReason(TGNotSupported);
342 #endif
343
344     // Check for an exception
345 #if USE(JSVALUE64)
346     jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
347     JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
348 #else
349     JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
350         JSInterfaceJIT::NotEqual,
351         JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
352         JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
353 #endif
354
355     jit.emitFunctionEpilogue();
356     // Return.
357     jit.ret();
358
359     // Handle an exception
360     exceptionHandler.link(&jit);
361
362     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
363
364 #if CPU(X86) && USE(JSVALUE32_64)
365     jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
366     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0);
367     jit.push(JSInterfaceJIT::regT0);
368 #else
369 #if OS(WINDOWS)
370     // Allocate space on stack for the 4 parameter registers.
371     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
372 #endif
373     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0);
374 #endif
375     jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
376     jit.call(JSInterfaceJIT::regT3);
377 #if CPU(X86) && USE(JSVALUE32_64)
378     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
379 #elif OS(WINDOWS)
380     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
381 #endif
382
383     jit.jumpToExceptionHandler();
384
385     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
386     return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
387 }
388
389 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
390 {
391     return nativeForGenerator(vm, CodeForCall);
392 }
393
394 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
395 {
396     return nativeForGenerator(vm, CodeForCall, EnterViaJump);
397 }
398
399 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
400 {
401     return nativeForGenerator(vm, CodeForConstruct);
402 }
403
404 MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
405 {
406     JSInterfaceJIT jit(vm);
407
408     // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in
409     // regT5 on 32-bit and regT7 on 64-bit.
410 #if USE(JSVALUE64)
411 #  if CPU(X86_64)
412     jit.pop(JSInterfaceJIT::regT4);
413 #  endif
414     jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
415     jit.neg64(JSInterfaceJIT::regT0);
416     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6);
417     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
418     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
419
420     // Move current frame down regT0 number of slots
421     JSInterfaceJIT::Label copyLoop(jit.label());
422     jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1);
423     jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
424     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
425     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
426
427     // Fill in regT0 - 1 missing arg slots with undefined
428     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
429     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
430     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
431     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
432     jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
433     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
434     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
435     
436     // Adjust call frame register and stack pointer to account for missing args
437     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
438     jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
439     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
440     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
441
442     // Save the original return PC.
443     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
444     jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
445     
446     // Install the new return PC.
447     jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
448
449 #  if CPU(X86_64)
450     jit.push(JSInterfaceJIT::regT4);
451 #  endif
452     jit.ret();
453 #else
454 #  if CPU(X86)
455     jit.pop(JSInterfaceJIT::regT4);
456 #  endif
457     jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
458     jit.neg32(JSInterfaceJIT::regT0);
459     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
460     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
461     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
462
463     // Move current frame down regT0 number of slots
464     JSInterfaceJIT::Label copyLoop(jit.label());
465     jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
466     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
467     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
468     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
469     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
470     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
471
472     // Fill in regT0 - 1 missing arg slots with undefined
473     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
474     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
475     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
476     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
477     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
478     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
479     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
480
481     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
482     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
483
484     // Adjust call frame register and stack pointer to account for missing args
485     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
486     jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
487     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
488     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
489
490     // Save the original return PC.
491     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
492     jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
493     
494     // Install the new return PC.
495     jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
496     
497 #  if CPU(X86)
498     jit.push(JSInterfaceJIT::regT4);
499 #  endif
500     jit.ret();
501 #endif
502
503     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
504     return FINALIZE_CODE(patchBuffer, ("fixup arity"));
505 }
506
507 MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm)
508 {
509     JSInterfaceJIT jit(vm);
510     
511 #if USE(JSVALUE64)
512     jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0);
513 #else
514     jit.setupResults(GPRInfo::regT0, GPRInfo::regT1);
515 #endif
516     
517     unsigned numberOfParameters = 0;
518     numberOfParameters++; // The 'this' argument.
519     numberOfParameters++; // The true return PC.
520     
521     unsigned numberOfRegsForCall =
522         JSStack::CallFrameHeaderSize + numberOfParameters;
523     
524     unsigned numberOfBytesForCall =
525         numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
526     
527     unsigned alignedNumberOfBytesForCall =
528         WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
529             
530     // The real return address is stored above the arguments. We passed one argument, which is
531     // 'this'. So argument at index 1 is the return address.
532     jit.loadPtr(
533         AssemblyHelpers::Address(
534             AssemblyHelpers::stackPointerRegister,
535             (virtualRegisterForArgument(1).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)),
536         GPRInfo::regT2);
537     
538     jit.addPtr(
539         AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall),
540         AssemblyHelpers::stackPointerRegister);
541     
542     jit.jump(GPRInfo::regT2);
543
544     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
545     return FINALIZE_CODE(patchBuffer, ("baseline getter return thunk"));
546 }
547
548 MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm)
549 {
550     JSInterfaceJIT jit(vm);
551     
552     unsigned numberOfParameters = 0;
553     numberOfParameters++; // The 'this' argument.
554     numberOfParameters++; // The value to set.
555     numberOfParameters++; // The true return PC.
556     
557     unsigned numberOfRegsForCall =
558         JSStack::CallFrameHeaderSize + numberOfParameters;
559     
560     unsigned numberOfBytesForCall =
561         numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
562     
563     unsigned alignedNumberOfBytesForCall =
564         WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
565             
566     // The real return address is stored above the arguments. We passed two arguments, so
567     // the argument at index 2 is the return address.
568     jit.loadPtr(
569         AssemblyHelpers::Address(
570             AssemblyHelpers::stackPointerRegister,
571             (virtualRegisterForArgument(2).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)),
572         GPRInfo::regT2);
573     
574     jit.addPtr(
575         AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall),
576         AssemblyHelpers::stackPointerRegister);
577     
578     jit.jump(GPRInfo::regT2);
579
580     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
581     return FINALIZE_CODE(patchBuffer, ("baseline setter return thunk"));
582 }
583
584 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
585 {
586     // load string
587     jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
588
589     // Load string length to regT2, and start the process of loading the data pointer into regT0
590     jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
591     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
592     jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
593
594     // load index
595     jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
596
597     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
598     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
599
600     // Load the character
601     SpecializedThunkJIT::JumpList is16Bit;
602     SpecializedThunkJIT::JumpList cont8Bit;
603     // Load the string flags
604     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
605     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
606     is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
607     jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
608     cont8Bit.append(jit.jump());
609     is16Bit.link(&jit);
610     jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
611     cont8Bit.link(&jit);
612 }
613
614 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
615 {
616     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
617     jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
618     jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
619     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
620 }
621
622 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
623 {
624     SpecializedThunkJIT jit(vm, 1);
625     stringCharLoad(jit, vm);
626     jit.returnInt32(SpecializedThunkJIT::regT0);
627     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
628 }
629
630 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
631 {
632     SpecializedThunkJIT jit(vm, 1);
633     stringCharLoad(jit, vm);
634     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
635     jit.returnJSCell(SpecializedThunkJIT::regT0);
636     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
637 }
638
639 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
640 {
641     SpecializedThunkJIT jit(vm, 1);
642     // load char code
643     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
644     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
645     jit.returnJSCell(SpecializedThunkJIT::regT0);
646     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
647 }
648
649 MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
650 {
651     SpecializedThunkJIT jit(vm, 1);
652     MacroAssembler::Jump nonIntArgJump;
653     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
654
655     SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
656     jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
657     jit.returnInt32(SpecializedThunkJIT::regT1);
658
659     if (jit.supportsFloatingPointTruncate()) {
660         nonIntArgJump.link(&jit);
661         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
662         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
663         jit.appendFailure(jit.jump());
664     } else
665         jit.appendFailure(nonIntArgJump);
666
667     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
668 }
669
670 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
671 {
672     SpecializedThunkJIT jit(vm, 1);
673     if (!jit.supportsFloatingPointSqrt())
674         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
675
676     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
677     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
678     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
679     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
680 }
681
682
683 #define UnaryDoubleOpWrapper(function) function##Wrapper
684 enum MathThunkCallingConvention { };
685 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
686 extern "C" {
687
688 double jsRound(double) REFERENCED_FROM_ASM;
689 double jsRound(double d)
690 {
691     double integer = ceil(d);
692     return integer - (integer - d > 0.5);
693 }
694
695 }
696
697 #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
698
699 #define defineUnaryDoubleOpWrapper(function) \
700     asm( \
701         ".text\n" \
702         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
703         HIDE_SYMBOL(function##Thunk) "\n" \
704         SYMBOL_STRING(function##Thunk) ":" "\n" \
705         "pushq %rax\n" \
706         "call " GLOBAL_REFERENCE(function) "\n" \
707         "popq %rcx\n" \
708         "ret\n" \
709     );\
710     extern "C" { \
711         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
712     } \
713     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
714
715 #elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__)
716 #define defineUnaryDoubleOpWrapper(function) \
717     asm( \
718         ".text\n" \
719         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
720         HIDE_SYMBOL(function##Thunk) "\n" \
721         SYMBOL_STRING(function##Thunk) ":" "\n" \
722         "pushl %ebx\n" \
723         "subl $20, %esp\n" \
724         "movsd %xmm0, (%esp) \n" \
725         "call __x86.get_pc_thunk.bx\n" \
726         "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
727         "call " GLOBAL_REFERENCE(function) "\n" \
728         "fstpl (%esp) \n" \
729         "movsd (%esp), %xmm0 \n" \
730         "addl $20, %esp\n" \
731         "popl %ebx\n" \
732         "ret\n" \
733     );\
734     extern "C" { \
735         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
736     } \
737     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
738
739 #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
740 #define defineUnaryDoubleOpWrapper(function) \
741     asm( \
742         ".text\n" \
743         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
744         HIDE_SYMBOL(function##Thunk) "\n" \
745         SYMBOL_STRING(function##Thunk) ":" "\n" \
746         "subl $20, %esp\n" \
747         "movsd %xmm0, (%esp) \n" \
748         "call " GLOBAL_REFERENCE(function) "\n" \
749         "fstpl (%esp) \n" \
750         "movsd (%esp), %xmm0 \n" \
751         "addl $20, %esp\n" \
752         "ret\n" \
753     );\
754     extern "C" { \
755         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
756     } \
757     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
758
759 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
760
761 #define defineUnaryDoubleOpWrapper(function) \
762     asm( \
763         ".text\n" \
764         ".align 2\n" \
765         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
766         HIDE_SYMBOL(function##Thunk) "\n" \
767         ".thumb\n" \
768         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
769         SYMBOL_STRING(function##Thunk) ":" "\n" \
770         "push {lr}\n" \
771         "vmov r0, r1, d0\n" \
772         "blx " GLOBAL_REFERENCE(function) "\n" \
773         "vmov d0, r0, r1\n" \
774         "pop {lr}\n" \
775         "bx lr\n" \
776     ); \
777     extern "C" { \
778         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
779     } \
780     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
781
782 #elif CPU(ARM64)
783
784 #define defineUnaryDoubleOpWrapper(function) \
785     asm( \
786         ".text\n" \
787         ".align 2\n" \
788         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
789         HIDE_SYMBOL(function##Thunk) "\n" \
790         SYMBOL_STRING(function##Thunk) ":" "\n" \
791         "b " GLOBAL_REFERENCE(function) "\n" \
792         ".previous" \
793     ); \
794     extern "C" { \
795         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
796     } \
797     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
798
799 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
800
801 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
802 static double (_cdecl *floorFunction)(double) = floor;
803 static double (_cdecl *ceilFunction)(double) = ceil;
804 static double (_cdecl *expFunction)(double) = exp;
805 static double (_cdecl *logFunction)(double) = log;
806 static double (_cdecl *jsRoundFunction)(double) = jsRound;
807
808 #define defineUnaryDoubleOpWrapper(function) \
809     extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
810     { \
811         __asm \
812         { \
813         __asm sub esp, 20 \
814         __asm movsd mmword ptr [esp], xmm0  \
815         __asm call function##Function \
816         __asm fstp qword ptr [esp] \
817         __asm movsd xmm0, mmword ptr [esp] \
818         __asm add esp, 20 \
819         __asm ret \
820         } \
821     } \
822     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
823
824 #else
825
826 #define defineUnaryDoubleOpWrapper(function) \
827     static MathThunk UnaryDoubleOpWrapper(function) = 0
828 #endif
829
830 defineUnaryDoubleOpWrapper(jsRound);
831 defineUnaryDoubleOpWrapper(exp);
832 defineUnaryDoubleOpWrapper(log);
833 defineUnaryDoubleOpWrapper(floor);
834 defineUnaryDoubleOpWrapper(ceil);
835
836 static const double oneConstant = 1.0;
837 static const double negativeHalfConstant = -0.5;
838 static const double zeroConstant = 0.0;
839 static const double halfConstant = 0.5;
840     
841 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
842 {
843     SpecializedThunkJIT jit(vm, 1);
844     MacroAssembler::Jump nonIntJump;
845     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
846         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
847     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
848     jit.returnInt32(SpecializedThunkJIT::regT0);
849     nonIntJump.link(&jit);
850     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
851 #if CPU(ARM64)
852     SpecializedThunkJIT::JumpList doubleResult;
853     jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
854     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
855     jit.returnInt32(SpecializedThunkJIT::regT0);
856     doubleResult.link(&jit);
857     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
858 #else
859     SpecializedThunkJIT::Jump intResult;
860     SpecializedThunkJIT::JumpList doubleResult;
861     if (jit.supportsFloatingPointTruncate()) {
862         jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
863         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
864         SpecializedThunkJIT::JumpList slowPath;
865         // Handle the negative doubles in the slow path for now.
866         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
867         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
868         intResult = jit.jump();
869         slowPath.link(&jit);
870     }
871     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
872     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
873     if (jit.supportsFloatingPointTruncate())
874         intResult.link(&jit);
875     jit.returnInt32(SpecializedThunkJIT::regT0);
876     doubleResult.link(&jit);
877     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
878 #endif // CPU(ARM64)
879     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
880 }
881
882 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
883 {
884     SpecializedThunkJIT jit(vm, 1);
885     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
886         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
887     MacroAssembler::Jump nonIntJump;
888     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
889     jit.returnInt32(SpecializedThunkJIT::regT0);
890     nonIntJump.link(&jit);
891     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
892 #if CPU(ARM64)
893     jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
894 #else
895     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
896 #endif // CPU(ARM64)
897     SpecializedThunkJIT::JumpList doubleResult;
898     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
899     jit.returnInt32(SpecializedThunkJIT::regT0);
900     doubleResult.link(&jit);
901     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
902     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
903 }
904
905 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
906 {
907     SpecializedThunkJIT jit(vm, 1);
908     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
909         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
910     MacroAssembler::Jump nonIntJump;
911     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
912     jit.returnInt32(SpecializedThunkJIT::regT0);
913     nonIntJump.link(&jit);
914     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
915     SpecializedThunkJIT::Jump intResult;
916     SpecializedThunkJIT::JumpList doubleResult;
917     if (jit.supportsFloatingPointTruncate()) {
918         jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
919         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
920         SpecializedThunkJIT::JumpList slowPath;
921         // Handle the negative doubles in the slow path for now.
922         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
923         jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
924         jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
925         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
926         intResult = jit.jump();
927         slowPath.link(&jit);
928     }
929     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
930     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
931     if (jit.supportsFloatingPointTruncate())
932         intResult.link(&jit);
933     jit.returnInt32(SpecializedThunkJIT::regT0);
934     doubleResult.link(&jit);
935     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
936     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
937 }
938
939 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
940 {
941     if (!UnaryDoubleOpWrapper(exp))
942         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
943     SpecializedThunkJIT jit(vm, 1);
944     if (!jit.supportsFloatingPoint())
945         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
946     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
947     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
948     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
949     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
950 }
951
952 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
953 {
954     if (!UnaryDoubleOpWrapper(log))
955         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
956     SpecializedThunkJIT jit(vm, 1);
957     if (!jit.supportsFloatingPoint())
958         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
959     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
960     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
961     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
962     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
963 }
964
965 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
966 {
967     SpecializedThunkJIT jit(vm, 1);
968     if (!jit.supportsFloatingPointAbs())
969         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
970     MacroAssembler::Jump nonIntJump;
971     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
972     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
973     jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
974     jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
975     jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
976     jit.returnInt32(SpecializedThunkJIT::regT0);
977     nonIntJump.link(&jit);
978     // Shame about the double int conversion here.
979     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
980     jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
981     jit.returnDouble(SpecializedThunkJIT::fpRegT1);
982     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
983 }
984
985 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
986 {
987     SpecializedThunkJIT jit(vm, 2);
988     if (!jit.supportsFloatingPoint())
989         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
990
991     jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
992     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
993     MacroAssembler::Jump nonIntExponent;
994     jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
995     jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
996     
997     MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
998     MacroAssembler::Label startLoop(jit.label());
999
1000     MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
1001     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
1002     exponentIsEven.link(&jit);
1003     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
1004     jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
1005     jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
1006
1007     exponentIsZero.link(&jit);
1008
1009     {
1010         SpecializedThunkJIT::JumpList doubleResult;
1011         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
1012         jit.returnInt32(SpecializedThunkJIT::regT0);
1013         doubleResult.link(&jit);
1014         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
1015     }
1016
1017     if (jit.supportsFloatingPointSqrt()) {
1018         nonIntExponent.link(&jit);
1019         jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
1020         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
1021         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
1022         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
1023         jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
1024         jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
1025
1026         SpecializedThunkJIT::JumpList doubleResult;
1027         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
1028         jit.returnInt32(SpecializedThunkJIT::regT0);
1029         doubleResult.link(&jit);
1030         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
1031     } else
1032         jit.appendFailure(nonIntExponent);
1033
1034     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
1035 }
1036
1037 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
1038 {
1039     SpecializedThunkJIT jit(vm, 2);
1040     MacroAssembler::Jump nonIntArg0Jump;
1041     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1042     SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1043     MacroAssembler::Jump nonIntArg1Jump;
1044     jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1045     SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1046     jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1047     jit.returnInt32(SpecializedThunkJIT::regT0);
1048
1049     if (jit.supportsFloatingPointTruncate()) {
1050         nonIntArg0Jump.link(&jit);
1051         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1052         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1053         jit.appendFailure(jit.jump());
1054     } else
1055         jit.appendFailure(nonIntArg0Jump);
1056
1057     if (jit.supportsFloatingPointTruncate()) {
1058         nonIntArg1Jump.link(&jit);
1059         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1060         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1061         jit.appendFailure(jit.jump());
1062     } else
1063         jit.appendFailure(nonIntArg1Jump);
1064
1065     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1066 }
1067
1068 }
1069
1070 #endif // ENABLE(JIT)