Templatize CodePtr/Refs/FunctionPtrs with PtrTags.
[WebKit-https.git] / Source / JavaScriptCore / jit / ThunkGenerators.cpp
1 /*
2  * Copyright (C) 2010-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITExceptions.h"
32 #include "JITOperations.h"
33 #include "JSArray.h"
34 #include "JSBoundFunction.h"
35 #include "JSCInlines.h"
36 #include "MathCommon.h"
37 #include "MaxFrameExtentForSlowPathCall.h"
38 #include "SpecializedThunkJIT.h"
39 #include <wtf/InlineASM.h>
40 #include <wtf/StringPrintStream.h>
41 #include <wtf/text/StringImpl.h>
42
43 #if ENABLE(JIT)
44
45 namespace JSC {
46
47 template<typename TagType>
48 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR, TagType tag)
49 {
50     if (ASSERT_DISABLED)
51         return;
52     CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
53     jit.abortWithReason(TGInvalidPointer);
54     isNonZero.link(&jit);
55     jit.pushToSave(pointerGPR);
56     jit.untagPtr(pointerGPR, tag);
57     jit.load8(pointerGPR, pointerGPR);
58     jit.popToRestore(pointerGPR);
59 }
60
61 // We will jump here if the JIT code tries to make a call, but the
62 // linking helper (C++ code) decides to throw an exception instead.
63 MacroAssemblerCodeRef<JITThunkPtrTag> throwExceptionFromCallSlowPathGenerator(VM* vm)
64 {
65     CCallHelpers jit;
66     
67     // The call pushed a return address, so we need to pop it back off to re-align the stack,
68     // even though we won't use it.
69     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
70
71     jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
72
73     jit.setupArguments<decltype(lookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
74     jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
75     emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
76     jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
77     jit.jumpToExceptionHandler(*vm);
78
79     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
80     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Throw exception from call slow path thunk");
81 }
82
83 static void slowPathFor(CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
84 {
85     jit.sanitizeStackInline(*vm, GPRInfo::nonArgGPR0);
86     jit.emitFunctionPrologue();
87     jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
88 #if OS(WINDOWS) && CPU(X86_64)
89     // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
90     // Other argument values are shift by 1. Use space on the stack for our two return values.
91     // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
92     // and space for the 16 byte return area.
93     jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
94     jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
95     jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
96     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
97     jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(slowPathFunction)), GPRInfo::nonArgGPR0);
98     emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
99     jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
100     jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
101     jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
102     jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
103 #else
104     if (maxFrameExtentForSlowPathCall)
105         jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
106     jit.setupArguments<decltype(slowPathFunction)>(GPRInfo::regT2);
107     jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(slowPathFunction)), GPRInfo::nonArgGPR0);
108     emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
109     jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
110     if (maxFrameExtentForSlowPathCall)
111         jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
112 #endif
113
114     // This slow call will return the address of one of the following:
115     // 1) Exception throwing thunk.
116     // 2) Host call return value returner thingy.
117     // 3) The function to call.
118     // The second return value GPR will hold a non-zero value for tail calls.
119
120     emitPointerValidation(jit, GPRInfo::returnValueGPR, JSEntryPtrTag);
121     jit.emitFunctionEpilogue();
122     jit.untagReturnAddress();
123
124     RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
125     CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
126
127     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
128     jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
129
130     doNotTrash.link(&jit);
131     jit.jump(GPRInfo::returnValueGPR, JSEntryPtrTag);
132 }
133
134 MacroAssemblerCodeRef<JITThunkPtrTag> linkCallThunkGenerator(VM* vm)
135 {
136     // The return address is on the stack or in the link register. We will hence
137     // save the return address to the call frame while we make a C++ function call
138     // to perform linking and lazy compilation if necessary. We expect the callee
139     // to be in regT0/regT1 (payload/tag), the CallFrame to have already
140     // been adjusted, and all other registers to be available for use.
141     CCallHelpers jit;
142
143     slowPathFor(jit, vm, operationLinkCall);
144
145     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
146     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Link call slow path thunk");
147 }
148
149 // For closure optimizations, we only include calls, since if you're using closures for
150 // object construction then you're going to lose big time anyway.
151 MacroAssemblerCodeRef<JITThunkPtrTag> linkPolymorphicCallThunkGenerator(VM* vm)
152 {
153     CCallHelpers jit;
154
155     slowPathFor(jit, vm, operationLinkPolymorphicCall);
156
157     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
158     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Link polymorphic call slow path thunk");
159 }
160
161 // FIXME: We should distinguish between a megamorphic virtual call vs. a slow
162 // path virtual call so that we can enable fast tail calls for megamorphic
163 // virtual calls by using the shuffler.
164 // https://bugs.webkit.org/show_bug.cgi?id=148831
165 MacroAssemblerCodeRef<JITStubRoutinePtrTag> virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
166 {
167     // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
168     // The return address is on the stack, or in the link register. We will hence
169     // jump to the callee, or save the return address to the call frame while we
170     // make a C++ function call to the appropriate JIT operation.
171
172     CCallHelpers jit;
173     
174     CCallHelpers::JumpList slowCase;
175     
176     // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
177     // slow path execution for the profiler.
178     jit.add32(
179         CCallHelpers::TrustedImm32(1),
180         CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
181
182     // FIXME: we should have a story for eliminating these checks. In many cases,
183     // the DFG knows that the value is definitely a cell, or definitely a function.
184     
185 #if USE(JSVALUE64)
186     GPRReg tagMaskRegister = GPRInfo::tagMaskRegister;
187     if (callLinkInfo.isTailCall()) {
188         // Tail calls could have clobbered the GPRInfo::tagMaskRegister because they
189         // restore callee saved registers before getthing here. So, let's materialize
190         // the TagMask in a temp register and use the temp instead.
191         tagMaskRegister = GPRInfo::regT4;
192         jit.move(CCallHelpers::TrustedImm64(TagMask), tagMaskRegister);
193     }
194     slowCase.append(
195         jit.branchTest64(CCallHelpers::NonZero, GPRInfo::regT0, tagMaskRegister));
196 #else
197     slowCase.append(
198         jit.branch32(
199             CCallHelpers::NotEqual, GPRInfo::regT1,
200             CCallHelpers::TrustedImm32(JSValue::CellTag)));
201 #endif
202     auto notJSFunction = jit.branchIfNotType(GPRInfo::regT0, JSFunctionType);
203     
204     // Now we know we have a JSFunction.
205
206     jit.loadPtr(
207         CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
208         GPRInfo::regT4);
209     jit.xorPtr(CCallHelpers::TrustedImmPtr(JSFunctionPoison::key()), GPRInfo::regT4);
210     jit.loadPtr(
211         CCallHelpers::Address(
212             GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
213                 callLinkInfo.specializationKind())),
214         GPRInfo::regT4);
215     slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
216     
217     // Now we know that we have a CodeBlock, and we're committed to making a fast
218     // call.
219 #if USE(JSVALUE64)
220     jit.move(CCallHelpers::TrustedImm64(JITCodePoison::key()), GPRInfo::regT1);
221     jit.xor64(GPRInfo::regT1, GPRInfo::regT4);
222 #endif
223
224     // Make a tail call. This will return back to JIT code.
225     JSInterfaceJIT::Label callCode(jit.label());
226     emitPointerValidation(jit, GPRInfo::regT4, JSEntryPtrTag);
227     if (callLinkInfo.isTailCall()) {
228         jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
229         jit.prepareForTailCallSlow(GPRInfo::regT4);
230     }
231     jit.jump(GPRInfo::regT4, JSEntryPtrTag);
232
233     notJSFunction.link(&jit);
234     slowCase.append(jit.branchIfNotType(GPRInfo::regT0, InternalFunctionType));
235     void* executableAddress = vm->getCTIInternalFunctionTrampolineFor(callLinkInfo.specializationKind()).executableAddress();
236     jit.move(CCallHelpers::TrustedImmPtr(executableAddress), GPRInfo::regT4);
237     jit.jump().linkTo(callCode, &jit);
238
239     slowCase.link(&jit);
240     
241     // Here we don't know anything, so revert to the full slow path.
242     slowPathFor(jit, vm, operationVirtualCall);
243
244     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
245     return FINALIZE_CODE(
246         patchBuffer, JITStubRoutinePtrTag,
247         "Virtual %s slow path thunk",
248         callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct");
249 }
250
251 enum ThunkEntryType { EnterViaCall, EnterViaJumpWithSavedTags, EnterViaJumpWithoutSavedTags };
252 enum class ThunkFunctionType { JSFunction, InternalFunction };
253
254 static MacroAssemblerCodeRef<JITThunkPtrTag> nativeForGenerator(VM* vm, ThunkFunctionType thunkFunctionType, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
255 {
256     // FIXME: This should be able to log ShadowChicken prologue packets.
257     // https://bugs.webkit.org/show_bug.cgi?id=155689
258     
259     int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
260     
261     JSInterfaceJIT jit(vm);
262
263     switch (entryType) {
264     case EnterViaCall:
265         jit.emitFunctionPrologue();
266         break;
267     case EnterViaJumpWithSavedTags:
268 #if USE(JSVALUE64)
269         // We're coming from a specialized thunk that has saved the prior tag registers' contents.
270         // Restore them now.
271 #if CPU(ARM64)
272         jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
273 #else
274         jit.pop(JSInterfaceJIT::tagMaskRegister);
275         jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
276 #endif
277 #endif
278         break;
279     case EnterViaJumpWithoutSavedTags:
280         jit.move(JSInterfaceJIT::framePointerRegister, JSInterfaceJIT::stackPointerRegister);
281         break;
282     }
283
284     jit.emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
285     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
286
287 #if CPU(X86)
288     // Calling convention:      f(ecx, edx, ...);
289     // Host function signature: f(ExecState*);
290     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
291
292     jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
293
294     // call the function
295     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::regT1);
296     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
297         jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
298         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), JSInterfaceJIT::regT1);
299         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction), JSEntryPtrTag);
300     } else
301         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, InternalFunction::offsetOfNativeFunctionFor(kind)), JSEntryPtrTag);
302
303     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
304
305 #elif CPU(X86_64)
306 #if !OS(WINDOWS)
307     // Calling convention:      f(edi, esi, edx, ecx, ...);
308     // Host function signature: f(ExecState*);
309     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
310
311     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::esi);
312     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
313         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
314         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), X86Registers::r9);
315         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction), X86Registers::r9);
316     } else
317         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, InternalFunction::offsetOfNativeFunctionFor(kind)), X86Registers::r9);
318     jit.move(JSInterfaceJIT::TrustedImm64(NativeCodePoison::key()), X86Registers::esi);
319     jit.xor64(X86Registers::esi, X86Registers::r9);
320     jit.call(X86Registers::r9, JSEntryPtrTag);
321
322 #else
323     // Calling convention:      f(ecx, edx, r8, r9, ...);
324     // Host function signature: f(ExecState*);
325     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
326
327     // Leave space for the callee parameter home addresses.
328     // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
329     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
330
331     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::edx);
332     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
333         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
334         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), X86Registers::r9);
335         jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction), JSEntryPtrTag);
336     } else
337         jit.call(JSInterfaceJIT::Address(X86Registers::edx, InternalFunction::offsetOfNativeFunctionFor(kind)), JSEntryPtrTag);
338
339     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
340 #endif
341
342 #elif CPU(ARM64)
343     COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
344     COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
345     COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
346
347     // Host function signature: f(ExecState*);
348     jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
349
350     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, ARM64Registers::x1);
351     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
352         jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
353         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), ARM64Registers::x2);
354         jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction), ARM64Registers::x2);
355     } else
356         jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, InternalFunction::offsetOfNativeFunctionFor(kind)), ARM64Registers::x2);
357     jit.move(JSInterfaceJIT::TrustedImm64(NativeCodePoison::key()), ARM64Registers::x1);
358     jit.xor64(ARM64Registers::x1, ARM64Registers::x2);
359     jit.call(ARM64Registers::x2, JSEntryPtrTag);
360
361 #elif CPU(ARM) || CPU(MIPS)
362 #if CPU(MIPS)
363     // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
364     jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
365 #endif
366
367     // Calling convention is f(argumentGPR0, argumentGPR1, ...).
368     // Host function signature is f(ExecState*).
369     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
370
371     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::argumentGPR1);
372     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
373         jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
374         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), JSInterfaceJIT::regT2);
375         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSEntryPtrTag);
376     } else
377         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, InternalFunction::offsetOfNativeFunctionFor(kind)), JSEntryPtrTag);
378
379 #if CPU(MIPS)
380     // Restore stack space
381     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
382 #endif
383 #else
384 #error "JIT not supported on this platform."
385     UNUSED_PARAM(executableOffsetToFunction);
386     abortWithReason(TGNotSupported);
387 #endif
388
389     // Check for an exception
390 #if USE(JSVALUE64)
391     jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
392     JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
393 #else
394     JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
395         JSInterfaceJIT::NotEqual,
396         JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
397         JSInterfaceJIT::TrustedImm32(0));
398 #endif
399
400     jit.emitFunctionEpilogue();
401     // Return.
402     jit.ret();
403
404     // Handle an exception
405     exceptionHandler.link(&jit);
406
407     jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
408     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
409
410 #if CPU(X86) && USE(JSVALUE32_64)
411     jit.subPtr(JSInterfaceJIT::TrustedImm32(4), JSInterfaceJIT::stackPointerRegister);
412     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
413     jit.push(JSInterfaceJIT::regT0);
414 #else
415 #if OS(WINDOWS)
416     // Allocate space on stack for the 4 parameter registers.
417     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
418 #endif
419     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
420 #endif
421     jit.move(JSInterfaceJIT::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationVMHandleException)), JSInterfaceJIT::regT3);
422     jit.call(JSInterfaceJIT::regT3, OperationPtrTag);
423 #if CPU(X86) && USE(JSVALUE32_64)
424     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
425 #elif OS(WINDOWS)
426     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
427 #endif
428
429     jit.jumpToExceptionHandler(*vm);
430
431     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
432     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "%s %s%s trampoline", thunkFunctionType == ThunkFunctionType::JSFunction ? "native" : "internal", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data());
433 }
434
435 MacroAssemblerCodeRef<JITThunkPtrTag> nativeCallGenerator(VM* vm)
436 {
437     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall);
438 }
439
440 MacroAssemblerCodeRef<JITThunkPtrTag> nativeTailCallGenerator(VM* vm)
441 {
442     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithSavedTags);
443 }
444
445 MacroAssemblerCodeRef<JITThunkPtrTag> nativeTailCallWithoutSavedTagsGenerator(VM* vm)
446 {
447     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithoutSavedTags);
448 }
449
450 MacroAssemblerCodeRef<JITThunkPtrTag> nativeConstructGenerator(VM* vm)
451 {
452     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForConstruct);
453 }
454
455 MacroAssemblerCodeRef<JITThunkPtrTag> internalFunctionCallGenerator(VM* vm)
456 {
457     return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForCall);
458 }
459
460 MacroAssemblerCodeRef<JITThunkPtrTag> internalFunctionConstructGenerator(VM* vm)
461 {
462     return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForConstruct);
463 }
464
465 MacroAssemblerCodeRef<JITThunkPtrTag> arityFixupGenerator(VM* vm)
466 {
467     JSInterfaceJIT jit(vm);
468
469     // We enter with fixup count in argumentGPR0
470     // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
471 #if USE(JSVALUE64)
472 #if OS(WINDOWS)
473     const GPRReg extraTemp = JSInterfaceJIT::regT0;
474 #else
475     const GPRReg extraTemp = JSInterfaceJIT::regT5;
476 #endif
477 #  if CPU(X86_64)
478     jit.pop(JSInterfaceJIT::regT4);
479 #  endif
480     jit.tagReturnAddress();
481 #if CPU(ARM64) && USE(POINTER_PROFILING)
482     jit.loadPtr(JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
483     jit.addPtr(JSInterfaceJIT::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, extraTemp);
484     jit.untagPtr(GPRInfo::regT3, extraTemp);
485     PtrTag tempReturnPCTag = static_cast<PtrTag>(random());
486     jit.move(JSInterfaceJIT::TrustedImmPtr(tempReturnPCTag), extraTemp);
487     jit.tagPtr(GPRInfo::regT3, extraTemp);
488     jit.storePtr(GPRInfo::regT3, JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
489 #endif
490     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
491     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
492     jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
493
494     // Check to see if we have extra slots we can use
495     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
496     jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
497     JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
498     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
499     JSInterfaceJIT::Label fillExtraSlots(jit.label());
500     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
501     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
502     jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
503     jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
504     JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
505     noExtraSlot.link(&jit);
506
507     jit.neg64(JSInterfaceJIT::argumentGPR0);
508
509     // Adjust call frame register and stack pointer to account for missing args.
510     // We need to change the stack pointer first before performing copy/fill loops.
511     // This stack space below the stack pointer is considered unsed by OS. Therefore,
512     // OS may corrupt this space when constructing a signal stack.
513     jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
514     jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
515     jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
516     jit.untagReturnAddress();
517     jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
518     jit.tagReturnAddress();
519
520     // Move current frame down argumentGPR0 number of slots
521     JSInterfaceJIT::Label copyLoop(jit.label());
522     jit.load64(JSInterfaceJIT::regT3, extraTemp);
523     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
524     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
525     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
526
527     // Fill in argumentGPR0 missing arg slots with undefined
528     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
529     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
530     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
531     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
532     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
533     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
534     
535     done.link(&jit);
536
537 #if CPU(ARM64) && USE(POINTER_PROFILING)
538     jit.loadPtr(JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
539     jit.move(JSInterfaceJIT::TrustedImmPtr(tempReturnPCTag), extraTemp);
540     jit.untagPtr(GPRInfo::regT3, extraTemp);
541     jit.addPtr(JSInterfaceJIT::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, extraTemp);
542     jit.tagPtr(GPRInfo::regT3, extraTemp);
543     jit.storePtr(GPRInfo::regT3, JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
544 #endif
545
546 #  if CPU(X86_64)
547     jit.push(JSInterfaceJIT::regT4);
548 #  endif
549     jit.ret();
550 #else // USE(JSVALUE64) section above, USE(JSVALUE32_64) section below.
551 #  if CPU(X86)
552     jit.pop(JSInterfaceJIT::regT4);
553 #  endif
554     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
555     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
556     jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
557
558     // Check to see if we have extra slots we can use
559     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
560     jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
561     JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
562     JSInterfaceJIT::Label fillExtraSlots(jit.label());
563     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
564     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
565     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
566     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
567     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
568     jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
569     jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
570     JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
571     noExtraSlot.link(&jit);
572
573     jit.neg32(JSInterfaceJIT::argumentGPR0);
574
575     // Move current frame down argumentGPR0 number of slots
576     JSInterfaceJIT::Label copyLoop(jit.label());
577     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
578     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
579     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
580     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
581     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
582     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
583
584     // Fill in argumentGPR0 missing arg slots with undefined
585     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
586     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
587     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
588     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
589     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
590     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
591
592     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
593     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
594
595     // Adjust call frame register and stack pointer to account for missing args
596     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
597     jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
598     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
599     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
600
601     done.link(&jit);
602
603 #  if CPU(X86)
604     jit.push(JSInterfaceJIT::regT4);
605 #  endif
606     jit.ret();
607 #endif // End of USE(JSVALUE32_64) section.
608
609     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
610     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "fixup arity");
611 }
612
613 MacroAssemblerCodeRef<JITThunkPtrTag> unreachableGenerator(VM* vm)
614 {
615     JSInterfaceJIT jit(vm);
616
617     jit.breakpoint();
618
619     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
620     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "unreachable thunk");
621 }
622
623 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
624 {
625     // load string
626     jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
627
628     // Load string length to regT2, and start the process of loading the data pointer into regT0
629     jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
630     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
631     jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
632
633     // load index
634     jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
635
636     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
637     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
638
639     // Load the character
640     SpecializedThunkJIT::JumpList is16Bit;
641     SpecializedThunkJIT::JumpList cont8Bit;
642     // Load the string flags
643     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
644     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
645     is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
646     jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
647     cont8Bit.append(jit.jump());
648     is16Bit.link(&jit);
649     jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
650     cont8Bit.link(&jit);
651 }
652
653 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
654 {
655     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
656     jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
657     jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
658     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
659 }
660
661 MacroAssemblerCodeRef<JITThunkPtrTag> charCodeAtThunkGenerator(VM* vm)
662 {
663     SpecializedThunkJIT jit(vm, 1);
664     stringCharLoad(jit, vm);
665     jit.returnInt32(SpecializedThunkJIT::regT0);
666     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
667 }
668
669 MacroAssemblerCodeRef<JITThunkPtrTag> charAtThunkGenerator(VM* vm)
670 {
671     SpecializedThunkJIT jit(vm, 1);
672     stringCharLoad(jit, vm);
673     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
674     jit.returnJSCell(SpecializedThunkJIT::regT0);
675     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
676 }
677
678 MacroAssemblerCodeRef<JITThunkPtrTag> fromCharCodeThunkGenerator(VM* vm)
679 {
680     SpecializedThunkJIT jit(vm, 1);
681     // load char code
682     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
683     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
684     jit.returnJSCell(SpecializedThunkJIT::regT0);
685     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
686 }
687
688 MacroAssemblerCodeRef<JITThunkPtrTag> clz32ThunkGenerator(VM* vm)
689 {
690     SpecializedThunkJIT jit(vm, 1);
691     MacroAssembler::Jump nonIntArgJump;
692     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
693
694     SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
695     jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
696     jit.returnInt32(SpecializedThunkJIT::regT1);
697
698     if (jit.supportsFloatingPointTruncate()) {
699         nonIntArgJump.link(&jit);
700         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
701         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
702         jit.appendFailure(jit.jump());
703     } else
704         jit.appendFailure(nonIntArgJump);
705
706     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
707 }
708
709 MacroAssemblerCodeRef<JITThunkPtrTag> sqrtThunkGenerator(VM* vm)
710 {
711     SpecializedThunkJIT jit(vm, 1);
712     if (!jit.supportsFloatingPointSqrt())
713         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
714
715     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
716     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
717     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
718     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
719 }
720
721
722 #define UnaryDoubleOpWrapper(function) function##Wrapper
723 enum MathThunkCallingConvention { };
724 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
725
726 #if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
727
728 #define defineUnaryDoubleOpWrapper(function) \
729     asm( \
730         ".text\n" \
731         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
732         HIDE_SYMBOL(function##Thunk) "\n" \
733         SYMBOL_STRING(function##Thunk) ":" "\n" \
734         "pushq %rax\n" \
735         "call " GLOBAL_REFERENCE(function) "\n" \
736         "popq %rcx\n" \
737         "ret\n" \
738     );\
739     extern "C" { \
740         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
741     } \
742     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
743
744 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
745 #define defineUnaryDoubleOpWrapper(function) \
746     asm( \
747         ".text\n" \
748         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
749         HIDE_SYMBOL(function##Thunk) "\n" \
750         SYMBOL_STRING(function##Thunk) ":" "\n" \
751         "pushl %ebx\n" \
752         "subl $20, %esp\n" \
753         "movsd %xmm0, (%esp) \n" \
754         "call __x86.get_pc_thunk.bx\n" \
755         "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
756         "call " GLOBAL_REFERENCE(function) "\n" \
757         "fstpl (%esp) \n" \
758         "movsd (%esp), %xmm0 \n" \
759         "addl $20, %esp\n" \
760         "popl %ebx\n" \
761         "ret\n" \
762     );\
763     extern "C" { \
764         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
765     } \
766     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
767
768 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
769 #define defineUnaryDoubleOpWrapper(function) \
770     asm( \
771         ".text\n" \
772         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
773         HIDE_SYMBOL(function##Thunk) "\n" \
774         SYMBOL_STRING(function##Thunk) ":" "\n" \
775         "subl $20, %esp\n" \
776         "movsd %xmm0, (%esp) \n" \
777         "call " GLOBAL_REFERENCE(function) "\n" \
778         "fstpl (%esp) \n" \
779         "movsd (%esp), %xmm0 \n" \
780         "addl $20, %esp\n" \
781         "ret\n" \
782     );\
783     extern "C" { \
784         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
785     } \
786     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
787
788 #elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
789
790 #define defineUnaryDoubleOpWrapper(function) \
791     asm( \
792         ".text\n" \
793         ".align 2\n" \
794         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
795         HIDE_SYMBOL(function##Thunk) "\n" \
796         ".thumb\n" \
797         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
798         SYMBOL_STRING(function##Thunk) ":" "\n" \
799         "push {lr}\n" \
800         "vmov r0, r1, d0\n" \
801         "blx " GLOBAL_REFERENCE(function) "\n" \
802         "vmov d0, r0, r1\n" \
803         "pop {lr}\n" \
804         "bx lr\n" \
805     ); \
806     extern "C" { \
807         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
808     } \
809     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
810
811 #elif CPU(ARM64)
812
813 #define defineUnaryDoubleOpWrapper(function) \
814     asm( \
815         ".text\n" \
816         ".align 2\n" \
817         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
818         HIDE_SYMBOL(function##Thunk) "\n" \
819         SYMBOL_STRING(function##Thunk) ":" "\n" \
820         "b " GLOBAL_REFERENCE(function) "\n" \
821         ".previous" \
822     ); \
823     extern "C" { \
824         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
825     } \
826     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
827
828 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
829
830 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
831 static double (_cdecl *floorFunction)(double) = floor;
832 static double (_cdecl *ceilFunction)(double) = ceil;
833 static double (_cdecl *truncFunction)(double) = trunc;
834 static double (_cdecl *expFunction)(double) = exp;
835 static double (_cdecl *logFunction)(double) = log;
836 static double (_cdecl *jsRoundFunction)(double) = jsRound;
837
838 #define defineUnaryDoubleOpWrapper(function) \
839     extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
840     { \
841         __asm \
842         { \
843         __asm sub esp, 20 \
844         __asm movsd mmword ptr [esp], xmm0  \
845         __asm call function##Function \
846         __asm fstp qword ptr [esp] \
847         __asm movsd xmm0, mmword ptr [esp] \
848         __asm add esp, 20 \
849         __asm ret \
850         } \
851     } \
852     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
853
854 #else
855
856 #define defineUnaryDoubleOpWrapper(function) \
857     static MathThunk UnaryDoubleOpWrapper(function) = 0
858 #endif
859
860 defineUnaryDoubleOpWrapper(jsRound);
861 defineUnaryDoubleOpWrapper(exp);
862 defineUnaryDoubleOpWrapper(log);
863 defineUnaryDoubleOpWrapper(floor);
864 defineUnaryDoubleOpWrapper(ceil);
865 defineUnaryDoubleOpWrapper(trunc);
866
867 static const double halfConstant = 0.5;
868     
869 MacroAssemblerCodeRef<JITThunkPtrTag> floorThunkGenerator(VM* vm)
870 {
871     SpecializedThunkJIT jit(vm, 1);
872     MacroAssembler::Jump nonIntJump;
873     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
874         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
875     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
876     jit.returnInt32(SpecializedThunkJIT::regT0);
877     nonIntJump.link(&jit);
878     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
879
880     if (jit.supportsFloatingPointRounding()) {
881         SpecializedThunkJIT::JumpList doubleResult;
882         jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
883         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
884         jit.returnInt32(SpecializedThunkJIT::regT0);
885         doubleResult.link(&jit);
886         jit.returnDouble(SpecializedThunkJIT::fpRegT0);
887         return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
888     }
889
890     SpecializedThunkJIT::Jump intResult;
891     SpecializedThunkJIT::JumpList doubleResult;
892     if (jit.supportsFloatingPointTruncate()) {
893         jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
894         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
895         SpecializedThunkJIT::JumpList slowPath;
896         // Handle the negative doubles in the slow path for now.
897         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
898         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
899         intResult = jit.jump();
900         slowPath.link(&jit);
901     }
902     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
903     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
904     if (jit.supportsFloatingPointTruncate())
905         intResult.link(&jit);
906     jit.returnInt32(SpecializedThunkJIT::regT0);
907     doubleResult.link(&jit);
908     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
909     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
910 }
911
912 MacroAssemblerCodeRef<JITThunkPtrTag> ceilThunkGenerator(VM* vm)
913 {
914     SpecializedThunkJIT jit(vm, 1);
915     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
916         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
917     MacroAssembler::Jump nonIntJump;
918     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
919     jit.returnInt32(SpecializedThunkJIT::regT0);
920     nonIntJump.link(&jit);
921     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
922     if (jit.supportsFloatingPointRounding())
923         jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
924     else
925         jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
926
927     SpecializedThunkJIT::JumpList doubleResult;
928     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
929     jit.returnInt32(SpecializedThunkJIT::regT0);
930     doubleResult.link(&jit);
931     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
932     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
933 }
934
935 MacroAssemblerCodeRef<JITThunkPtrTag> truncThunkGenerator(VM* vm)
936 {
937     SpecializedThunkJIT jit(vm, 1);
938     if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
939         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
940     MacroAssembler::Jump nonIntJump;
941     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
942     jit.returnInt32(SpecializedThunkJIT::regT0);
943     nonIntJump.link(&jit);
944     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
945     if (jit.supportsFloatingPointRounding())
946         jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
947     else
948         jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
949
950     SpecializedThunkJIT::JumpList doubleResult;
951     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
952     jit.returnInt32(SpecializedThunkJIT::regT0);
953     doubleResult.link(&jit);
954     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
955     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "trunc");
956 }
957
958 MacroAssemblerCodeRef<JITThunkPtrTag> roundThunkGenerator(VM* vm)
959 {
960     SpecializedThunkJIT jit(vm, 1);
961     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
962         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
963     MacroAssembler::Jump nonIntJump;
964     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
965     jit.returnInt32(SpecializedThunkJIT::regT0);
966     nonIntJump.link(&jit);
967     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
968     SpecializedThunkJIT::Jump intResult;
969     SpecializedThunkJIT::JumpList doubleResult;
970     if (jit.supportsFloatingPointTruncate()) {
971         jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
972         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
973         SpecializedThunkJIT::JumpList slowPath;
974         // Handle the negative doubles in the slow path for now.
975         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
976         jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
977         jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
978         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
979         intResult = jit.jump();
980         slowPath.link(&jit);
981     }
982     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
983     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
984     if (jit.supportsFloatingPointTruncate())
985         intResult.link(&jit);
986     jit.returnInt32(SpecializedThunkJIT::regT0);
987     doubleResult.link(&jit);
988     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
989     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
990 }
991
992 MacroAssemblerCodeRef<JITThunkPtrTag> expThunkGenerator(VM* vm)
993 {
994     if (!UnaryDoubleOpWrapper(exp))
995         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
996     SpecializedThunkJIT jit(vm, 1);
997     if (!jit.supportsFloatingPoint())
998         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
999     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1000     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
1001     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1002     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
1003 }
1004
1005 MacroAssemblerCodeRef<JITThunkPtrTag> logThunkGenerator(VM* vm)
1006 {
1007     if (!UnaryDoubleOpWrapper(log))
1008         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1009     SpecializedThunkJIT jit(vm, 1);
1010     if (!jit.supportsFloatingPoint())
1011         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1012     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1013     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
1014     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1015     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
1016 }
1017
1018 MacroAssemblerCodeRef<JITThunkPtrTag> absThunkGenerator(VM* vm)
1019 {
1020     SpecializedThunkJIT jit(vm, 1);
1021     if (!jit.supportsFloatingPointAbs())
1022         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1023
1024 #if USE(JSVALUE64)
1025     unsigned virtualRegisterIndex = CallFrame::argumentOffset(0);
1026     jit.load64(AssemblyHelpers::addressFor(virtualRegisterIndex), GPRInfo::regT0);
1027     MacroAssembler::Jump notInteger = jit.branch64(MacroAssembler::Below, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister);
1028
1029     // Abs Int32.
1030     jit.rshift32(GPRInfo::regT0, MacroAssembler::TrustedImm32(31), GPRInfo::regT1);
1031     jit.add32(GPRInfo::regT1, GPRInfo::regT0);
1032     jit.xor32(GPRInfo::regT1, GPRInfo::regT0);
1033
1034     // IntMin cannot be inverted.
1035     MacroAssembler::Jump integerIsIntMin = jit.branchTest32(MacroAssembler::Signed, GPRInfo::regT0);
1036
1037     // Box and finish.
1038     jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
1039     MacroAssembler::Jump doneWithIntegers = jit.jump();
1040
1041     // Handle Doubles.
1042     notInteger.link(&jit);
1043     jit.appendFailure(jit.branchTest64(MacroAssembler::Zero, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister));
1044     jit.unboxDoubleWithoutAssertions(GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT0);
1045     MacroAssembler::Label absFPR0Label = jit.label();
1046     jit.absDouble(FPRInfo::fpRegT0, FPRInfo::fpRegT1);
1047     jit.boxDouble(FPRInfo::fpRegT1, GPRInfo::regT0);
1048
1049     // Tail.
1050     doneWithIntegers.link(&jit);
1051     jit.returnJSValue(GPRInfo::regT0);
1052
1053     // We know the value of regT0 is IntMin. We could load that value from memory but
1054     // it is simpler to just convert it.
1055     integerIsIntMin.link(&jit);
1056     jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
1057     jit.jump().linkTo(absFPR0Label, &jit);
1058 #else
1059     MacroAssembler::Jump nonIntJump;
1060     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
1061     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
1062     jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1063     jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1064     jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
1065     jit.returnInt32(SpecializedThunkJIT::regT0);
1066     nonIntJump.link(&jit);
1067     // Shame about the double int conversion here.
1068     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1069     jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
1070     jit.returnDouble(SpecializedThunkJIT::fpRegT1);
1071 #endif
1072     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
1073 }
1074
1075 MacroAssemblerCodeRef<JITThunkPtrTag> imulThunkGenerator(VM* vm)
1076 {
1077     SpecializedThunkJIT jit(vm, 2);
1078     MacroAssembler::Jump nonIntArg0Jump;
1079     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1080     SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1081     MacroAssembler::Jump nonIntArg1Jump;
1082     jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1083     SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1084     jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1085     jit.returnInt32(SpecializedThunkJIT::regT0);
1086
1087     if (jit.supportsFloatingPointTruncate()) {
1088         nonIntArg0Jump.link(&jit);
1089         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1090         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1091         jit.appendFailure(jit.jump());
1092     } else
1093         jit.appendFailure(nonIntArg0Jump);
1094
1095     if (jit.supportsFloatingPointTruncate()) {
1096         nonIntArg1Jump.link(&jit);
1097         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1098         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1099         jit.appendFailure(jit.jump());
1100     } else
1101         jit.appendFailure(nonIntArg1Jump);
1102
1103     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1104 }
1105
1106 MacroAssemblerCodeRef<JITThunkPtrTag> randomThunkGenerator(VM* vm)
1107 {
1108     SpecializedThunkJIT jit(vm, 0);
1109     if (!jit.supportsFloatingPoint())
1110         return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1111
1112 #if USE(JSVALUE64)
1113     jit.emitRandomThunk(*vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
1114     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1115
1116     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
1117 #else
1118     return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1119 #endif
1120 }
1121
1122 MacroAssemblerCodeRef<JITThunkPtrTag> boundThisNoArgsFunctionCallGenerator(VM* vm)
1123 {
1124     CCallHelpers jit;
1125     
1126     jit.emitFunctionPrologue();
1127     
1128     // Set up our call frame.
1129     jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock));
1130     jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCount));
1131
1132     unsigned extraStackNeeded = 0;
1133     if (unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes())
1134         extraStackNeeded = stackAlignmentBytes() - stackMisalignment;
1135     
1136     // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail
1137     // call here as far as I can tell. At least not so long as the generic path doesn't do a tail
1138     // call, since that would be way too weird.
1139     
1140     // The formula for the number of stack bytes needed given some number of parameters (including
1141     // this) is:
1142     //
1143     //     stackAlign((numParams + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC))
1144     //
1145     // Probably we want to write this as:
1146     //
1147     //     stackAlign((numParams + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register))
1148     //
1149     // That's really all there is to this. We have all the registers we need to do it.
1150     
1151     jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCount), GPRInfo::regT1);
1152     jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters), GPRInfo::regT1, GPRInfo::regT2);
1153     jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2);
1154     jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2);
1155     jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2);
1156     
1157     if (extraStackNeeded)
1158         jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2);
1159     
1160     // At this point regT1 has the actual argument count and regT2 has the amount of stack we will
1161     // need.
1162     
1163     jit.subPtr(GPRInfo::regT2, CCallHelpers::stackPointerRegister);
1164
1165     // Do basic callee frame setup, including 'this'.
1166     
1167     jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT3);
1168
1169     jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCount));
1170     
1171     JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT2);
1172     jit.loadValue(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfBoundThis()), valueRegs);
1173     jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0));
1174
1175     jit.loadPtr(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfTargetFunction()), GPRInfo::regT3);
1176     jit.storeCell(GPRInfo::regT3, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
1177     
1178     // OK, now we can start copying. This is a simple matter of copying parameters from the caller's
1179     // frame to the callee's frame. Note that we know that regT1 (the argument count) must be at
1180     // least 1.
1181     jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1182     CCallHelpers::Jump done = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT1);
1183     
1184     CCallHelpers::Label loop = jit.label();
1185     jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1186     jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgument(1)).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight), valueRegs);
1187     jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
1188     jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT1).linkTo(loop, &jit);
1189     
1190     done.link(&jit);
1191     
1192     jit.loadPtr(
1193         CCallHelpers::Address(GPRInfo::regT3, JSFunction::offsetOfExecutable()),
1194         GPRInfo::regT0);
1195     jit.xorPtr(CCallHelpers::TrustedImmPtr(JSFunctionPoison::key()), GPRInfo::regT0);
1196     jit.loadPtr(
1197         CCallHelpers::Address(
1198             GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
1199         GPRInfo::regT0);
1200     CCallHelpers::Jump noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0);
1201     
1202 #if USE(JSVALUE64)
1203     jit.move(CCallHelpers::TrustedImm64(JITCodePoison::key()), GPRInfo::regT1);
1204     jit.xor64(GPRInfo::regT1, GPRInfo::regT0);
1205 #endif
1206     emitPointerValidation(jit, GPRInfo::regT0, JSEntryPtrTag);
1207     jit.call(GPRInfo::regT0, JSEntryPtrTag);
1208
1209     jit.emitFunctionEpilogue();
1210     jit.ret();
1211     
1212     LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID);
1213     linkBuffer.link(noCode, CodeLocationLabel<JITThunkPtrTag>(vm->jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
1214     return FINALIZE_CODE(
1215         linkBuffer, JITThunkPtrTag, "Specialized thunk for bound function calls with no arguments");
1216 }
1217
1218 } // namespace JSC
1219
1220 #endif // ENABLE(JIT)