89aa47541bc58427b037e70e63d1fac512097476
[WebKit-https.git] / Source / JavaScriptCore / jit / ThunkGenerators.cpp
1 /*
2  * Copyright (C) 2010-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITExceptions.h"
32 #include "JITOperations.h"
33 #include "JSArray.h"
34 #include "JSBoundFunction.h"
35 #include "JSCInlines.h"
36 #include "MathCommon.h"
37 #include "MaxFrameExtentForSlowPathCall.h"
38 #include "SpecializedThunkJIT.h"
39 #include <wtf/InlineASM.h>
40 #include <wtf/StringPrintStream.h>
41 #include <wtf/text/StringImpl.h>
42
43 #if ENABLE(JIT)
44
45 namespace JSC {
46
47 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
48 {
49     if (ASSERT_DISABLED)
50         return;
51     CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
52     jit.abortWithReason(TGInvalidPointer);
53     isNonZero.link(&jit);
54     jit.pushToSave(pointerGPR);
55     jit.load8(pointerGPR, pointerGPR);
56     jit.popToRestore(pointerGPR);
57 }
58
59 // We will jump here if the JIT code tries to make a call, but the
60 // linking helper (C++ code) decides to throw an exception instead.
61 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
62 {
63     CCallHelpers jit;
64     
65     // The call pushed a return address, so we need to pop it back off to re-align the stack,
66     // even though we won't use it.
67     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
68
69     jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
70
71     jit.setupArguments<decltype(lookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
72     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
73     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
74     jit.call(GPRInfo::nonArgGPR0);
75     jit.jumpToExceptionHandler(*vm);
76
77     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
78     return FINALIZE_CODE(patchBuffer, "Throw exception from call slow path thunk");
79 }
80
81 static void slowPathFor(
82     CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
83 {
84     jit.sanitizeStackInline(*vm, GPRInfo::nonArgGPR0);
85     jit.emitFunctionPrologue();
86     jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
87 #if OS(WINDOWS) && CPU(X86_64)
88     // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
89     // Other argument values are shift by 1. Use space on the stack for our two return values.
90     // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
91     // and space for the 16 byte return area.
92     jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
93     jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
94     jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
95     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
96     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
97     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
98     jit.call(GPRInfo::nonArgGPR0);
99     jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
100     jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
101     jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
102 #else
103     if (maxFrameExtentForSlowPathCall)
104         jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
105     jit.setupArguments<decltype(slowPathFunction)>(GPRInfo::regT2);
106     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
107     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
108     jit.call(GPRInfo::nonArgGPR0);
109     if (maxFrameExtentForSlowPathCall)
110         jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
111 #endif
112
113     // This slow call will return the address of one of the following:
114     // 1) Exception throwing thunk.
115     // 2) Host call return value returner thingy.
116     // 3) The function to call.
117     // The second return value GPR will hold a non-zero value for tail calls.
118
119     emitPointerValidation(jit, GPRInfo::returnValueGPR);
120     jit.emitFunctionEpilogue();
121
122     RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
123     CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
124
125     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
126     jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
127
128     doNotTrash.link(&jit);
129     jit.jump(GPRInfo::returnValueGPR);
130 }
131
132 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
133 {
134     // The return address is on the stack or in the link register. We will hence
135     // save the return address to the call frame while we make a C++ function call
136     // to perform linking and lazy compilation if necessary. We expect the callee
137     // to be in regT0/regT1 (payload/tag), the CallFrame to have already
138     // been adjusted, and all other registers to be available for use.
139     CCallHelpers jit;
140     
141     slowPathFor(jit, vm, operationLinkCall);
142     
143     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
144     return FINALIZE_CODE(patchBuffer, "Link call slow path thunk");
145 }
146
147 // For closure optimizations, we only include calls, since if you're using closures for
148 // object construction then you're going to lose big time anyway.
149 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
150 {
151     CCallHelpers jit;
152     
153     slowPathFor(jit, vm, operationLinkPolymorphicCall);
154     
155     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
156     return FINALIZE_CODE(patchBuffer, "Link polymorphic call slow path thunk");
157 }
158
159 // FIXME: We should distinguish between a megamorphic virtual call vs. a slow
160 // path virtual call so that we can enable fast tail calls for megamorphic
161 // virtual calls by using the shuffler.
162 // https://bugs.webkit.org/show_bug.cgi?id=148831
163 MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
164 {
165     // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
166     // The return address is on the stack, or in the link register. We will hence
167     // jump to the callee, or save the return address to the call frame while we
168     // make a C++ function call to the appropriate JIT operation.
169
170     CCallHelpers jit;
171     
172     CCallHelpers::JumpList slowCase;
173     
174     // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
175     // slow path execution for the profiler.
176     jit.add32(
177         CCallHelpers::TrustedImm32(1),
178         CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
179
180     // FIXME: we should have a story for eliminating these checks. In many cases,
181     // the DFG knows that the value is definitely a cell, or definitely a function.
182     
183 #if USE(JSVALUE64)
184     GPRReg tagMaskRegister = GPRInfo::tagMaskRegister;
185     if (callLinkInfo.isTailCall()) {
186         // Tail calls could have clobbered the GPRInfo::tagMaskRegister because they
187         // restore callee saved registers before getthing here. So, let's materialize
188         // the TagMask in a temp register and use the temp instead.
189         tagMaskRegister = GPRInfo::regT4;
190         jit.move(CCallHelpers::TrustedImm64(TagMask), tagMaskRegister);
191     }
192     slowCase.append(
193         jit.branchTest64(CCallHelpers::NonZero, GPRInfo::regT0, tagMaskRegister));
194 #else
195     slowCase.append(
196         jit.branch32(
197             CCallHelpers::NotEqual, GPRInfo::regT1,
198             CCallHelpers::TrustedImm32(JSValue::CellTag)));
199 #endif
200     auto notJSFunction = jit.branchIfNotType(GPRInfo::regT0, JSFunctionType);
201     
202     // Now we know we have a JSFunction.
203     
204     jit.loadPtr(
205         CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
206         GPRInfo::regT4);
207     jit.xorPtr(CCallHelpers::TrustedImmPtr(JSFunctionPoison::key()), GPRInfo::regT4);
208     jit.loadPtr(
209         CCallHelpers::Address(
210             GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
211                 callLinkInfo.specializationKind())),
212         GPRInfo::regT4);
213     slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
214     
215     // Now we know that we have a CodeBlock, and we're committed to making a fast
216     // call.
217 #if USE(JSVALUE64)
218     jit.move(CCallHelpers::TrustedImm64(JITCodePoison::key()), GPRInfo::regT1);
219     jit.xor64(GPRInfo::regT1, GPRInfo::regT4);
220 #endif
221
222     // Make a tail call. This will return back to JIT code.
223     JSInterfaceJIT::Label callCode(jit.label());
224     emitPointerValidation(jit, GPRInfo::regT4);
225     if (callLinkInfo.isTailCall()) {
226         jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
227         jit.prepareForTailCallSlow(GPRInfo::regT4);
228     }
229     jit.jump(GPRInfo::regT4);
230
231     notJSFunction.link(&jit);
232     slowCase.append(jit.branchIfNotType(GPRInfo::regT0, InternalFunctionType));
233     jit.move(CCallHelpers::TrustedImmPtr(vm->getCTIInternalFunctionTrampolineFor(callLinkInfo.specializationKind()).executableAddress()), GPRInfo::regT4);
234     jit.jump().linkTo(callCode, &jit);
235
236     slowCase.link(&jit);
237     
238     // Here we don't know anything, so revert to the full slow path.
239     
240     slowPathFor(jit, vm, operationVirtualCall);
241     
242     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
243     return FINALIZE_CODE(
244         patchBuffer,
245         "Virtual %s slow path thunk",
246         callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct");
247 }
248
249 enum ThunkEntryType { EnterViaCall, EnterViaJumpWithSavedTags, EnterViaJumpWithoutSavedTags };
250 enum class ThunkFunctionType { JSFunction, InternalFunction };
251
252 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, ThunkFunctionType thunkFunctionType, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
253 {
254     // FIXME: This should be able to log ShadowChicken prologue packets.
255     // https://bugs.webkit.org/show_bug.cgi?id=155689
256     
257     int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
258     
259     JSInterfaceJIT jit(vm);
260
261     switch (entryType) {
262     case EnterViaCall:
263         jit.emitFunctionPrologue();
264         break;
265     case EnterViaJumpWithSavedTags:
266 #if USE(JSVALUE64)
267         // We're coming from a specialized thunk that has saved the prior tag registers' contents.
268         // Restore them now.
269 #if CPU(ARM64)
270         jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
271 #else
272         jit.pop(JSInterfaceJIT::tagMaskRegister);
273         jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
274 #endif
275 #endif
276         break;
277     case EnterViaJumpWithoutSavedTags:
278         jit.move(JSInterfaceJIT::framePointerRegister, JSInterfaceJIT::stackPointerRegister);
279         break;
280     }
281
282     jit.emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
283     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
284
285 #if CPU(X86)
286     // Calling convention:      f(ecx, edx, ...);
287     // Host function signature: f(ExecState*);
288     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
289
290     jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
291
292     // call the function
293     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::regT1);
294     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
295         jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
296         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), JSInterfaceJIT::regT1);
297         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
298     } else
299         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, InternalFunction::offsetOfNativeFunctionFor(kind)));
300
301     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
302
303 #elif CPU(X86_64)
304 #if !OS(WINDOWS)
305     // Calling convention:      f(edi, esi, edx, ecx, ...);
306     // Host function signature: f(ExecState*);
307     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
308
309     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::esi);
310     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
311         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
312         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), X86Registers::r9);
313         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction), X86Registers::r9);
314     } else
315         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, InternalFunction::offsetOfNativeFunctionFor(kind)), X86Registers::r9);
316     jit.move(JSInterfaceJIT::TrustedImm64(NativeCodePoison::key()), X86Registers::esi);
317     jit.xor64(X86Registers::esi, X86Registers::r9);
318     jit.call(X86Registers::r9);
319
320 #else
321     // Calling convention:      f(ecx, edx, r8, r9, ...);
322     // Host function signature: f(ExecState*);
323     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
324
325     // Leave space for the callee parameter home addresses.
326     // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
327     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
328
329     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::edx);
330     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
331         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
332         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), X86Registers::r9);
333         jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
334     } else
335         jit.call(JSInterfaceJIT::Address(X86Registers::edx, InternalFunction::offsetOfNativeFunctionFor(kind)));
336
337     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
338 #endif
339
340 #elif CPU(ARM64)
341     COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
342     COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
343     COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
344
345     // Host function signature: f(ExecState*);
346     jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
347
348     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, ARM64Registers::x1);
349     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
350         jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
351         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), ARM64Registers::x2);
352         jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction), ARM64Registers::x2);
353     } else
354         jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, InternalFunction::offsetOfNativeFunctionFor(kind)), ARM64Registers::x2);
355     jit.move(JSInterfaceJIT::TrustedImm64(NativeCodePoison::key()), ARM64Registers::x1);
356     jit.xor64(ARM64Registers::x1, ARM64Registers::x2);
357     jit.call(ARM64Registers::x2);
358
359 #elif CPU(ARM) || CPU(MIPS)
360 #if CPU(MIPS)
361     // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
362     jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
363 #endif
364
365     // Calling convention is f(argumentGPR0, argumentGPR1, ...).
366     // Host function signature is f(ExecState*).
367     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
368
369     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::argumentGPR1);
370     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
371         jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
372         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), JSInterfaceJIT::regT2);
373         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
374     } else
375         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, InternalFunction::offsetOfNativeFunctionFor(kind)));
376
377 #if CPU(MIPS)
378     // Restore stack space
379     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
380 #endif
381 #else
382 #error "JIT not supported on this platform."
383     UNUSED_PARAM(executableOffsetToFunction);
384     abortWithReason(TGNotSupported);
385 #endif
386
387     // Check for an exception
388 #if USE(JSVALUE64)
389     jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
390     JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
391 #else
392     JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
393         JSInterfaceJIT::NotEqual,
394         JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
395         JSInterfaceJIT::TrustedImm32(0));
396 #endif
397
398     jit.emitFunctionEpilogue();
399     // Return.
400     jit.ret();
401
402     // Handle an exception
403     exceptionHandler.link(&jit);
404
405     jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
406     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
407
408 #if CPU(X86) && USE(JSVALUE32_64)
409     jit.subPtr(JSInterfaceJIT::TrustedImm32(4), JSInterfaceJIT::stackPointerRegister);
410     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
411     jit.push(JSInterfaceJIT::regT0);
412 #else
413 #if OS(WINDOWS)
414     // Allocate space on stack for the 4 parameter registers.
415     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
416 #endif
417     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
418 #endif
419     jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
420     jit.call(JSInterfaceJIT::regT3);
421 #if CPU(X86) && USE(JSVALUE32_64)
422     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
423 #elif OS(WINDOWS)
424     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
425 #endif
426
427     jit.jumpToExceptionHandler(*vm);
428
429     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
430     return FINALIZE_CODE(patchBuffer, "%s %s%s trampoline", thunkFunctionType == ThunkFunctionType::JSFunction ? "native" : "internal", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data());
431 }
432
433 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
434 {
435     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall);
436 }
437
438 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
439 {
440     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithSavedTags);
441 }
442
443 MacroAssemblerCodeRef nativeTailCallWithoutSavedTagsGenerator(VM* vm)
444 {
445     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithoutSavedTags);
446 }
447
448 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
449 {
450     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForConstruct);
451 }
452
453 MacroAssemblerCodeRef internalFunctionCallGenerator(VM* vm)
454 {
455     return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForCall);
456 }
457
458 MacroAssemblerCodeRef internalFunctionConstructGenerator(VM* vm)
459 {
460     return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForConstruct);
461 }
462
463 MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
464 {
465     JSInterfaceJIT jit(vm);
466
467     // We enter with fixup count in argumentGPR0
468     // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
469 #if USE(JSVALUE64)
470 #if OS(WINDOWS)
471     const GPRReg extraTemp = JSInterfaceJIT::regT0;
472 #else
473     const GPRReg extraTemp = JSInterfaceJIT::regT5;
474 #endif
475 #  if CPU(X86_64)
476     jit.pop(JSInterfaceJIT::regT4);
477 #  endif
478     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
479     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
480     jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
481
482     // Check to see if we have extra slots we can use
483     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
484     jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
485     JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
486     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
487     JSInterfaceJIT::Label fillExtraSlots(jit.label());
488     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
489     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
490     jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
491     jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
492     JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
493     noExtraSlot.link(&jit);
494
495     jit.neg64(JSInterfaceJIT::argumentGPR0);
496
497     // Adjust call frame register and stack pointer to account for missing args.
498     // We need to change the stack pointer first before performing copy/fill loops.
499     // This stack space below the stack pointer is considered unsed by OS. Therefore,
500     // OS may corrupt this space when constructing a signal stack.
501     jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
502     jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
503     jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
504     jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
505
506     // Move current frame down argumentGPR0 number of slots
507     JSInterfaceJIT::Label copyLoop(jit.label());
508     jit.load64(JSInterfaceJIT::regT3, extraTemp);
509     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
510     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
511     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
512
513     // Fill in argumentGPR0 missing arg slots with undefined
514     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
515     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
516     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
517     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
518     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
519     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
520     
521     done.link(&jit);
522
523 #  if CPU(X86_64)
524     jit.push(JSInterfaceJIT::regT4);
525 #  endif
526     jit.ret();
527 #else
528 #  if CPU(X86)
529     jit.pop(JSInterfaceJIT::regT4);
530 #  endif
531     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
532     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
533     jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
534
535     // Check to see if we have extra slots we can use
536     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
537     jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
538     JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
539     JSInterfaceJIT::Label fillExtraSlots(jit.label());
540     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
541     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
542     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
543     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
544     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
545     jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
546     jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
547     JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
548     noExtraSlot.link(&jit);
549
550     jit.neg32(JSInterfaceJIT::argumentGPR0);
551
552     // Move current frame down argumentGPR0 number of slots
553     JSInterfaceJIT::Label copyLoop(jit.label());
554     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
555     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
556     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
557     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
558     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
559     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
560
561     // Fill in argumentGPR0 missing arg slots with undefined
562     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
563     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
564     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
565     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
566     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
567     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
568
569     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
570     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
571
572     // Adjust call frame register and stack pointer to account for missing args
573     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
574     jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
575     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
576     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
577
578     done.link(&jit);
579
580 #  if CPU(X86)
581     jit.push(JSInterfaceJIT::regT4);
582 #  endif
583     jit.ret();
584 #endif
585
586     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
587     return FINALIZE_CODE(patchBuffer, "fixup arity");
588 }
589
590 MacroAssemblerCodeRef unreachableGenerator(VM* vm)
591 {
592     JSInterfaceJIT jit(vm);
593
594     jit.breakpoint();
595
596     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
597     return FINALIZE_CODE(patchBuffer, "unreachable thunk");
598 }
599
600 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
601 {
602     // load string
603     jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
604
605     // Load string length to regT2, and start the process of loading the data pointer into regT0
606     jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
607     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
608     jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
609
610     // load index
611     jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
612
613     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
614     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
615
616     // Load the character
617     SpecializedThunkJIT::JumpList is16Bit;
618     SpecializedThunkJIT::JumpList cont8Bit;
619     // Load the string flags
620     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
621     jit.and32(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::maskOffset()), SpecializedThunkJIT::regT1);
622     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
623     is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
624     jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
625     cont8Bit.append(jit.jump());
626     is16Bit.link(&jit);
627     jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
628     cont8Bit.link(&jit);
629 }
630
631 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
632 {
633     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
634     jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
635     jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
636     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
637 }
638
639 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
640 {
641     SpecializedThunkJIT jit(vm, 1);
642     stringCharLoad(jit, vm);
643     jit.returnInt32(SpecializedThunkJIT::regT0);
644     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
645 }
646
647 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
648 {
649     SpecializedThunkJIT jit(vm, 1);
650     stringCharLoad(jit, vm);
651     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
652     jit.returnJSCell(SpecializedThunkJIT::regT0);
653     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
654 }
655
656 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
657 {
658     SpecializedThunkJIT jit(vm, 1);
659     // load char code
660     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
661     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
662     jit.returnJSCell(SpecializedThunkJIT::regT0);
663     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
664 }
665
666 MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
667 {
668     SpecializedThunkJIT jit(vm, 1);
669     MacroAssembler::Jump nonIntArgJump;
670     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
671
672     SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
673     jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
674     jit.returnInt32(SpecializedThunkJIT::regT1);
675
676     if (jit.supportsFloatingPointTruncate()) {
677         nonIntArgJump.link(&jit);
678         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
679         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
680         jit.appendFailure(jit.jump());
681     } else
682         jit.appendFailure(nonIntArgJump);
683
684     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
685 }
686
687 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
688 {
689     SpecializedThunkJIT jit(vm, 1);
690     if (!jit.supportsFloatingPointSqrt())
691         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
692
693     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
694     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
695     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
696     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
697 }
698
699
700 #define UnaryDoubleOpWrapper(function) function##Wrapper
701 enum MathThunkCallingConvention { };
702 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
703
704 #if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
705
706 #define defineUnaryDoubleOpWrapper(function) \
707     asm( \
708         ".text\n" \
709         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
710         HIDE_SYMBOL(function##Thunk) "\n" \
711         SYMBOL_STRING(function##Thunk) ":" "\n" \
712         "pushq %rax\n" \
713         "call " GLOBAL_REFERENCE(function) "\n" \
714         "popq %rcx\n" \
715         "ret\n" \
716     );\
717     extern "C" { \
718         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
719     } \
720     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
721
722 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
723 #define defineUnaryDoubleOpWrapper(function) \
724     asm( \
725         ".text\n" \
726         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
727         HIDE_SYMBOL(function##Thunk) "\n" \
728         SYMBOL_STRING(function##Thunk) ":" "\n" \
729         "pushl %ebx\n" \
730         "subl $20, %esp\n" \
731         "movsd %xmm0, (%esp) \n" \
732         "call __x86.get_pc_thunk.bx\n" \
733         "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
734         "call " GLOBAL_REFERENCE(function) "\n" \
735         "fstpl (%esp) \n" \
736         "movsd (%esp), %xmm0 \n" \
737         "addl $20, %esp\n" \
738         "popl %ebx\n" \
739         "ret\n" \
740     );\
741     extern "C" { \
742         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
743     } \
744     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
745
746 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
747 #define defineUnaryDoubleOpWrapper(function) \
748     asm( \
749         ".text\n" \
750         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
751         HIDE_SYMBOL(function##Thunk) "\n" \
752         SYMBOL_STRING(function##Thunk) ":" "\n" \
753         "subl $20, %esp\n" \
754         "movsd %xmm0, (%esp) \n" \
755         "call " GLOBAL_REFERENCE(function) "\n" \
756         "fstpl (%esp) \n" \
757         "movsd (%esp), %xmm0 \n" \
758         "addl $20, %esp\n" \
759         "ret\n" \
760     );\
761     extern "C" { \
762         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
763     } \
764     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
765
766 #elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
767
768 #define defineUnaryDoubleOpWrapper(function) \
769     asm( \
770         ".text\n" \
771         ".align 2\n" \
772         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
773         HIDE_SYMBOL(function##Thunk) "\n" \
774         ".thumb\n" \
775         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
776         SYMBOL_STRING(function##Thunk) ":" "\n" \
777         "push {lr}\n" \
778         "vmov r0, r1, d0\n" \
779         "blx " GLOBAL_REFERENCE(function) "\n" \
780         "vmov d0, r0, r1\n" \
781         "pop {lr}\n" \
782         "bx lr\n" \
783     ); \
784     extern "C" { \
785         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
786     } \
787     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
788
789 #elif CPU(ARM64)
790
791 #define defineUnaryDoubleOpWrapper(function) \
792     asm( \
793         ".text\n" \
794         ".align 2\n" \
795         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
796         HIDE_SYMBOL(function##Thunk) "\n" \
797         SYMBOL_STRING(function##Thunk) ":" "\n" \
798         "b " GLOBAL_REFERENCE(function) "\n" \
799         ".previous" \
800     ); \
801     extern "C" { \
802         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
803     } \
804     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
805
806 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
807
808 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
809 static double (_cdecl *floorFunction)(double) = floor;
810 static double (_cdecl *ceilFunction)(double) = ceil;
811 static double (_cdecl *truncFunction)(double) = trunc;
812 static double (_cdecl *expFunction)(double) = exp;
813 static double (_cdecl *logFunction)(double) = log;
814 static double (_cdecl *jsRoundFunction)(double) = jsRound;
815
816 #define defineUnaryDoubleOpWrapper(function) \
817     extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
818     { \
819         __asm \
820         { \
821         __asm sub esp, 20 \
822         __asm movsd mmword ptr [esp], xmm0  \
823         __asm call function##Function \
824         __asm fstp qword ptr [esp] \
825         __asm movsd xmm0, mmword ptr [esp] \
826         __asm add esp, 20 \
827         __asm ret \
828         } \
829     } \
830     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
831
832 #else
833
834 #define defineUnaryDoubleOpWrapper(function) \
835     static MathThunk UnaryDoubleOpWrapper(function) = 0
836 #endif
837
838 defineUnaryDoubleOpWrapper(jsRound);
839 defineUnaryDoubleOpWrapper(exp);
840 defineUnaryDoubleOpWrapper(log);
841 defineUnaryDoubleOpWrapper(floor);
842 defineUnaryDoubleOpWrapper(ceil);
843 defineUnaryDoubleOpWrapper(trunc);
844
845 static const double halfConstant = 0.5;
846     
847 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
848 {
849     SpecializedThunkJIT jit(vm, 1);
850     MacroAssembler::Jump nonIntJump;
851     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
852         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
853     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
854     jit.returnInt32(SpecializedThunkJIT::regT0);
855     nonIntJump.link(&jit);
856     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
857
858     if (jit.supportsFloatingPointRounding()) {
859         SpecializedThunkJIT::JumpList doubleResult;
860         jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
861         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
862         jit.returnInt32(SpecializedThunkJIT::regT0);
863         doubleResult.link(&jit);
864         jit.returnDouble(SpecializedThunkJIT::fpRegT0);
865         return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
866     }
867
868     SpecializedThunkJIT::Jump intResult;
869     SpecializedThunkJIT::JumpList doubleResult;
870     if (jit.supportsFloatingPointTruncate()) {
871         jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
872         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
873         SpecializedThunkJIT::JumpList slowPath;
874         // Handle the negative doubles in the slow path for now.
875         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
876         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
877         intResult = jit.jump();
878         slowPath.link(&jit);
879     }
880     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
881     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
882     if (jit.supportsFloatingPointTruncate())
883         intResult.link(&jit);
884     jit.returnInt32(SpecializedThunkJIT::regT0);
885     doubleResult.link(&jit);
886     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
887     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
888 }
889
890 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
891 {
892     SpecializedThunkJIT jit(vm, 1);
893     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
894         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
895     MacroAssembler::Jump nonIntJump;
896     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
897     jit.returnInt32(SpecializedThunkJIT::regT0);
898     nonIntJump.link(&jit);
899     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
900     if (jit.supportsFloatingPointRounding())
901         jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
902     else
903         jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
904
905     SpecializedThunkJIT::JumpList doubleResult;
906     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
907     jit.returnInt32(SpecializedThunkJIT::regT0);
908     doubleResult.link(&jit);
909     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
910     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
911 }
912
913 MacroAssemblerCodeRef truncThunkGenerator(VM* vm)
914 {
915     SpecializedThunkJIT jit(vm, 1);
916     if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
917         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
918     MacroAssembler::Jump nonIntJump;
919     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
920     jit.returnInt32(SpecializedThunkJIT::regT0);
921     nonIntJump.link(&jit);
922     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
923     if (jit.supportsFloatingPointRounding())
924         jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
925     else
926         jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
927
928     SpecializedThunkJIT::JumpList doubleResult;
929     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
930     jit.returnInt32(SpecializedThunkJIT::regT0);
931     doubleResult.link(&jit);
932     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
933     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "trunc");
934 }
935
936 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
937 {
938     SpecializedThunkJIT jit(vm, 1);
939     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
940         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
941     MacroAssembler::Jump nonIntJump;
942     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
943     jit.returnInt32(SpecializedThunkJIT::regT0);
944     nonIntJump.link(&jit);
945     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
946     SpecializedThunkJIT::Jump intResult;
947     SpecializedThunkJIT::JumpList doubleResult;
948     if (jit.supportsFloatingPointTruncate()) {
949         jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
950         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
951         SpecializedThunkJIT::JumpList slowPath;
952         // Handle the negative doubles in the slow path for now.
953         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
954         jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
955         jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
956         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
957         intResult = jit.jump();
958         slowPath.link(&jit);
959     }
960     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
961     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
962     if (jit.supportsFloatingPointTruncate())
963         intResult.link(&jit);
964     jit.returnInt32(SpecializedThunkJIT::regT0);
965     doubleResult.link(&jit);
966     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
967     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
968 }
969
970 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
971 {
972     if (!UnaryDoubleOpWrapper(exp))
973         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
974     SpecializedThunkJIT jit(vm, 1);
975     if (!jit.supportsFloatingPoint())
976         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
977     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
978     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
979     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
980     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
981 }
982
983 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
984 {
985     if (!UnaryDoubleOpWrapper(log))
986         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
987     SpecializedThunkJIT jit(vm, 1);
988     if (!jit.supportsFloatingPoint())
989         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
990     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
991     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
992     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
993     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
994 }
995
996 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
997 {
998     SpecializedThunkJIT jit(vm, 1);
999     if (!jit.supportsFloatingPointAbs())
1000         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1001
1002 #if USE(JSVALUE64)
1003     unsigned virtualRegisterIndex = CallFrame::argumentOffset(0);
1004     jit.load64(AssemblyHelpers::addressFor(virtualRegisterIndex), GPRInfo::regT0);
1005     MacroAssembler::Jump notInteger = jit.branch64(MacroAssembler::Below, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister);
1006
1007     // Abs Int32.
1008     jit.rshift32(GPRInfo::regT0, MacroAssembler::TrustedImm32(31), GPRInfo::regT1);
1009     jit.add32(GPRInfo::regT1, GPRInfo::regT0);
1010     jit.xor32(GPRInfo::regT1, GPRInfo::regT0);
1011
1012     // IntMin cannot be inverted.
1013     MacroAssembler::Jump integerIsIntMin = jit.branchTest32(MacroAssembler::Signed, GPRInfo::regT0);
1014
1015     // Box and finish.
1016     jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
1017     MacroAssembler::Jump doneWithIntegers = jit.jump();
1018
1019     // Handle Doubles.
1020     notInteger.link(&jit);
1021     jit.appendFailure(jit.branchTest64(MacroAssembler::Zero, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister));
1022     jit.unboxDoubleWithoutAssertions(GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT0);
1023     MacroAssembler::Label absFPR0Label = jit.label();
1024     jit.absDouble(FPRInfo::fpRegT0, FPRInfo::fpRegT1);
1025     jit.boxDouble(FPRInfo::fpRegT1, GPRInfo::regT0);
1026
1027     // Tail.
1028     doneWithIntegers.link(&jit);
1029     jit.returnJSValue(GPRInfo::regT0);
1030
1031     // We know the value of regT0 is IntMin. We could load that value from memory but
1032     // it is simpler to just convert it.
1033     integerIsIntMin.link(&jit);
1034     jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
1035     jit.jump().linkTo(absFPR0Label, &jit);
1036 #else
1037     MacroAssembler::Jump nonIntJump;
1038     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
1039     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
1040     jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1041     jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1042     jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
1043     jit.returnInt32(SpecializedThunkJIT::regT0);
1044     nonIntJump.link(&jit);
1045     // Shame about the double int conversion here.
1046     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1047     jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
1048     jit.returnDouble(SpecializedThunkJIT::fpRegT1);
1049 #endif
1050     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
1051 }
1052
1053 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
1054 {
1055     SpecializedThunkJIT jit(vm, 2);
1056     MacroAssembler::Jump nonIntArg0Jump;
1057     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1058     SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1059     MacroAssembler::Jump nonIntArg1Jump;
1060     jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1061     SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1062     jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1063     jit.returnInt32(SpecializedThunkJIT::regT0);
1064
1065     if (jit.supportsFloatingPointTruncate()) {
1066         nonIntArg0Jump.link(&jit);
1067         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1068         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1069         jit.appendFailure(jit.jump());
1070     } else
1071         jit.appendFailure(nonIntArg0Jump);
1072
1073     if (jit.supportsFloatingPointTruncate()) {
1074         nonIntArg1Jump.link(&jit);
1075         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1076         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1077         jit.appendFailure(jit.jump());
1078     } else
1079         jit.appendFailure(nonIntArg1Jump);
1080
1081     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1082 }
1083
1084 MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
1085 {
1086     SpecializedThunkJIT jit(vm, 0);
1087     if (!jit.supportsFloatingPoint())
1088         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1089
1090 #if USE(JSVALUE64)
1091     jit.emitRandomThunk(*vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
1092     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1093
1094     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
1095 #else
1096     return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1097 #endif
1098 }
1099
1100 MacroAssemblerCodeRef boundThisNoArgsFunctionCallGenerator(VM* vm)
1101 {
1102     CCallHelpers jit;
1103     
1104     jit.emitFunctionPrologue();
1105     
1106     // Set up our call frame.
1107     jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock));
1108     jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCount));
1109
1110     unsigned extraStackNeeded = 0;
1111     if (unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes())
1112         extraStackNeeded = stackAlignmentBytes() - stackMisalignment;
1113     
1114     // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail
1115     // call here as far as I can tell. At least not so long as the generic path doesn't do a tail
1116     // call, since that would be way too weird.
1117     
1118     // The formula for the number of stack bytes needed given some number of parameters (including
1119     // this) is:
1120     //
1121     //     stackAlign((numParams + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC))
1122     //
1123     // Probably we want to write this as:
1124     //
1125     //     stackAlign((numParams + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register))
1126     //
1127     // That's really all there is to this. We have all the registers we need to do it.
1128     
1129     jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCount), GPRInfo::regT1);
1130     jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters), GPRInfo::regT1, GPRInfo::regT2);
1131     jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2);
1132     jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2);
1133     jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2);
1134     
1135     if (extraStackNeeded)
1136         jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2);
1137     
1138     // At this point regT1 has the actual argument count and regT2 has the amount of stack we will
1139     // need.
1140     
1141     jit.subPtr(GPRInfo::regT2, CCallHelpers::stackPointerRegister);
1142
1143     // Do basic callee frame setup, including 'this'.
1144     
1145     jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT3);
1146
1147     jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCount));
1148     
1149     JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT2);
1150     jit.loadValue(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfBoundThis()), valueRegs);
1151     jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0));
1152
1153     jit.loadPtr(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfTargetFunction()), GPRInfo::regT3);
1154     jit.storeCell(GPRInfo::regT3, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
1155     
1156     // OK, now we can start copying. This is a simple matter of copying parameters from the caller's
1157     // frame to the callee's frame. Note that we know that regT1 (the argument count) must be at
1158     // least 1.
1159     jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1160     CCallHelpers::Jump done = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT1);
1161     
1162     CCallHelpers::Label loop = jit.label();
1163     jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1164     jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgument(1)).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight), valueRegs);
1165     jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
1166     jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT1).linkTo(loop, &jit);
1167     
1168     done.link(&jit);
1169     
1170     jit.loadPtr(
1171         CCallHelpers::Address(GPRInfo::regT3, JSFunction::offsetOfExecutable()),
1172         GPRInfo::regT0);
1173     jit.xorPtr(CCallHelpers::TrustedImmPtr(JSFunctionPoison::key()), GPRInfo::regT0);
1174     jit.loadPtr(
1175         CCallHelpers::Address(
1176             GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
1177         GPRInfo::regT0);
1178     CCallHelpers::Jump noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0);
1179     
1180 #if USE(JSVALUE64)
1181     jit.move(CCallHelpers::TrustedImm64(JITCodePoison::key()), GPRInfo::regT1);
1182     jit.xor64(GPRInfo::regT1, GPRInfo::regT0);
1183 #endif
1184     emitPointerValidation(jit, GPRInfo::regT0);
1185     jit.call(GPRInfo::regT0);
1186     
1187     jit.emitFunctionEpilogue();
1188     jit.ret();
1189     
1190     LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID);
1191     linkBuffer.link(noCode, CodeLocationLabel(vm->jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
1192     return FINALIZE_CODE(
1193         linkBuffer, "Specialized thunk for bound function calls with no arguments");
1194 }
1195
1196 } // namespace JSC
1197
1198 #endif // ENABLE(JIT)