Enhance the MacroAssembler and LinkBuffer to support pointer profiling.
[WebKit-https.git] / Source / JavaScriptCore / jit / ThunkGenerators.cpp
1 /*
2  * Copyright (C) 2010-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITExceptions.h"
32 #include "JITOperations.h"
33 #include "JSArray.h"
34 #include "JSBoundFunction.h"
35 #include "JSCInlines.h"
36 #include "MathCommon.h"
37 #include "MaxFrameExtentForSlowPathCall.h"
38 #include "SpecializedThunkJIT.h"
39 #include <wtf/InlineASM.h>
40 #include <wtf/StringPrintStream.h>
41 #include <wtf/text/StringImpl.h>
42
43 #if ENABLE(JIT)
44
45 namespace JSC {
46
47 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
48 {
49     if (ASSERT_DISABLED)
50         return;
51     CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
52     jit.abortWithReason(TGInvalidPointer);
53     isNonZero.link(&jit);
54     jit.pushToSave(pointerGPR);
55     jit.load8(pointerGPR, pointerGPR);
56     jit.popToRestore(pointerGPR);
57 }
58
59 // We will jump here if the JIT code tries to make a call, but the
60 // linking helper (C++ code) decides to throw an exception instead.
61 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
62 {
63     CCallHelpers jit;
64     
65     // The call pushed a return address, so we need to pop it back off to re-align the stack,
66     // even though we won't use it.
67     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
68
69     jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
70
71     jit.setupArguments<decltype(lookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
72     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
73     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
74     jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
75     jit.jumpToExceptionHandler(*vm);
76
77     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
78     return FINALIZE_CODE(patchBuffer, NoPtrTag, "Throw exception from call slow path thunk");
79 }
80
81 static void slowPathFor(
82     CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
83 {
84     jit.sanitizeStackInline(*vm, GPRInfo::nonArgGPR0);
85     jit.emitFunctionPrologue();
86     jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
87 #if OS(WINDOWS) && CPU(X86_64)
88     // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
89     // Other argument values are shift by 1. Use space on the stack for our two return values.
90     // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
91     // and space for the 16 byte return area.
92     jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
93     jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
94     jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
95     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
96     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
97     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
98     jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
99     jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
100     jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
101     jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
102 #else
103     if (maxFrameExtentForSlowPathCall)
104         jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
105     jit.setupArguments<decltype(slowPathFunction)>(GPRInfo::regT2);
106     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
107     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
108     jit.call(GPRInfo::nonArgGPR0, NoPtrTag);
109     if (maxFrameExtentForSlowPathCall)
110         jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
111 #endif
112
113     // This slow call will return the address of one of the following:
114     // 1) Exception throwing thunk.
115     // 2) Host call return value returner thingy.
116     // 3) The function to call.
117     // The second return value GPR will hold a non-zero value for tail calls.
118
119     emitPointerValidation(jit, GPRInfo::returnValueGPR);
120     jit.emitFunctionEpilogue();
121
122     RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
123     CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
124
125     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
126     jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
127
128     doNotTrash.link(&jit);
129     jit.jump(GPRInfo::returnValueGPR, NoPtrTag);
130 }
131
132 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
133 {
134     // The return address is on the stack or in the link register. We will hence
135     // save the return address to the call frame while we make a C++ function call
136     // to perform linking and lazy compilation if necessary. We expect the callee
137     // to be in regT0/regT1 (payload/tag), the CallFrame to have already
138     // been adjusted, and all other registers to be available for use.
139     CCallHelpers jit;
140     
141     slowPathFor(jit, vm, operationLinkCall);
142     
143     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
144     return FINALIZE_CODE(patchBuffer, NoPtrTag, "Link call slow path thunk");
145 }
146
147 // For closure optimizations, we only include calls, since if you're using closures for
148 // object construction then you're going to lose big time anyway.
149 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
150 {
151     CCallHelpers jit;
152     
153     slowPathFor(jit, vm, operationLinkPolymorphicCall);
154     
155     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
156     return FINALIZE_CODE(patchBuffer, NoPtrTag, "Link polymorphic call slow path thunk");
157 }
158
159 // FIXME: We should distinguish between a megamorphic virtual call vs. a slow
160 // path virtual call so that we can enable fast tail calls for megamorphic
161 // virtual calls by using the shuffler.
162 // https://bugs.webkit.org/show_bug.cgi?id=148831
163 MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
164 {
165     // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
166     // The return address is on the stack, or in the link register. We will hence
167     // jump to the callee, or save the return address to the call frame while we
168     // make a C++ function call to the appropriate JIT operation.
169
170     CCallHelpers jit;
171     
172     CCallHelpers::JumpList slowCase;
173     
174     // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
175     // slow path execution for the profiler.
176     jit.add32(
177         CCallHelpers::TrustedImm32(1),
178         CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
179
180     // FIXME: we should have a story for eliminating these checks. In many cases,
181     // the DFG knows that the value is definitely a cell, or definitely a function.
182     
183 #if USE(JSVALUE64)
184     GPRReg tagMaskRegister = GPRInfo::tagMaskRegister;
185     if (callLinkInfo.isTailCall()) {
186         // Tail calls could have clobbered the GPRInfo::tagMaskRegister because they
187         // restore callee saved registers before getthing here. So, let's materialize
188         // the TagMask in a temp register and use the temp instead.
189         tagMaskRegister = GPRInfo::regT4;
190         jit.move(CCallHelpers::TrustedImm64(TagMask), tagMaskRegister);
191     }
192     slowCase.append(
193         jit.branchTest64(CCallHelpers::NonZero, GPRInfo::regT0, tagMaskRegister));
194 #else
195     slowCase.append(
196         jit.branch32(
197             CCallHelpers::NotEqual, GPRInfo::regT1,
198             CCallHelpers::TrustedImm32(JSValue::CellTag)));
199 #endif
200     auto notJSFunction = jit.branchIfNotType(GPRInfo::regT0, JSFunctionType);
201     
202     // Now we know we have a JSFunction.
203     
204     jit.loadPtr(
205         CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
206         GPRInfo::regT4);
207     jit.xorPtr(CCallHelpers::TrustedImmPtr(JSFunctionPoison::key()), GPRInfo::regT4);
208     jit.loadPtr(
209         CCallHelpers::Address(
210             GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
211                 callLinkInfo.specializationKind())),
212         GPRInfo::regT4);
213     slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
214     
215     // Now we know that we have a CodeBlock, and we're committed to making a fast
216     // call.
217 #if USE(JSVALUE64)
218     jit.move(CCallHelpers::TrustedImm64(JITCodePoison::key()), GPRInfo::regT1);
219     jit.xor64(GPRInfo::regT1, GPRInfo::regT4);
220 #endif
221
222     // Make a tail call. This will return back to JIT code.
223     JSInterfaceJIT::Label callCode(jit.label());
224     emitPointerValidation(jit, GPRInfo::regT4);
225     if (callLinkInfo.isTailCall()) {
226         jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
227         jit.prepareForTailCallSlow(GPRInfo::regT4);
228     }
229     jit.jump(GPRInfo::regT4, NoPtrTag);
230
231     notJSFunction.link(&jit);
232     slowCase.append(jit.branchIfNotType(GPRInfo::regT0, InternalFunctionType));
233     jit.move(CCallHelpers::TrustedImmPtr(vm->getCTIInternalFunctionTrampolineFor(callLinkInfo.specializationKind()).executableAddress()), GPRInfo::regT4);
234     jit.jump().linkTo(callCode, &jit);
235
236     slowCase.link(&jit);
237     
238     // Here we don't know anything, so revert to the full slow path.
239     slowPathFor(jit, vm, operationVirtualCall);
240
241     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
242     return FINALIZE_CODE(
243         patchBuffer, NoPtrTag,
244         "Virtual %s slow path thunk",
245         callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct");
246 }
247
248 enum ThunkEntryType { EnterViaCall, EnterViaJumpWithSavedTags, EnterViaJumpWithoutSavedTags };
249 enum class ThunkFunctionType { JSFunction, InternalFunction };
250
251 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, ThunkFunctionType thunkFunctionType, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
252 {
253     // FIXME: This should be able to log ShadowChicken prologue packets.
254     // https://bugs.webkit.org/show_bug.cgi?id=155689
255     
256     int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
257     
258     JSInterfaceJIT jit(vm);
259
260     switch (entryType) {
261     case EnterViaCall:
262         jit.emitFunctionPrologue();
263         break;
264     case EnterViaJumpWithSavedTags:
265 #if USE(JSVALUE64)
266         // We're coming from a specialized thunk that has saved the prior tag registers' contents.
267         // Restore them now.
268 #if CPU(ARM64)
269         jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
270 #else
271         jit.pop(JSInterfaceJIT::tagMaskRegister);
272         jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
273 #endif
274 #endif
275         break;
276     case EnterViaJumpWithoutSavedTags:
277         jit.move(JSInterfaceJIT::framePointerRegister, JSInterfaceJIT::stackPointerRegister);
278         break;
279     }
280
281     jit.emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
282     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
283
284 #if CPU(X86)
285     // Calling convention:      f(ecx, edx, ...);
286     // Host function signature: f(ExecState*);
287     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
288
289     jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
290
291     // call the function
292     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::regT1);
293     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
294         jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
295         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), JSInterfaceJIT::regT1);
296         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction), NoPtrTag);
297     } else
298         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, InternalFunction::offsetOfNativeFunctionFor(kind)), NoPtrTag);
299
300     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
301
302 #elif CPU(X86_64)
303 #if !OS(WINDOWS)
304     // Calling convention:      f(edi, esi, edx, ecx, ...);
305     // Host function signature: f(ExecState*);
306     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
307
308     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::esi);
309     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
310         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
311         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), X86Registers::r9);
312         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction), X86Registers::r9);
313     } else
314         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, InternalFunction::offsetOfNativeFunctionFor(kind)), X86Registers::r9);
315     jit.move(JSInterfaceJIT::TrustedImm64(NativeCodePoison::key()), X86Registers::esi);
316     jit.xor64(X86Registers::esi, X86Registers::r9);
317     jit.call(X86Registers::r9, NoPtrTag);
318
319 #else
320     // Calling convention:      f(ecx, edx, r8, r9, ...);
321     // Host function signature: f(ExecState*);
322     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
323
324     // Leave space for the callee parameter home addresses.
325     // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
326     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
327
328     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::edx);
329     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
330         jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
331         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), X86Registers::r9);
332         jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction), NoPtrTag);
333     } else
334         jit.call(JSInterfaceJIT::Address(X86Registers::edx, InternalFunction::offsetOfNativeFunctionFor(kind)), NoPtrTag);
335
336     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
337 #endif
338
339 #elif CPU(ARM64)
340     COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
341     COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
342     COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
343
344     // Host function signature: f(ExecState*);
345     jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
346
347     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, ARM64Registers::x1);
348     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
349         jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
350         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), ARM64Registers::x2);
351         jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction), ARM64Registers::x2);
352     } else
353         jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, InternalFunction::offsetOfNativeFunctionFor(kind)), ARM64Registers::x2);
354     jit.move(JSInterfaceJIT::TrustedImm64(NativeCodePoison::key()), ARM64Registers::x1);
355     jit.xor64(ARM64Registers::x1, ARM64Registers::x2);
356     jit.call(ARM64Registers::x2, NoPtrTag);
357
358 #elif CPU(ARM) || CPU(MIPS)
359 #if CPU(MIPS)
360     // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
361     jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
362 #endif
363
364     // Calling convention is f(argumentGPR0, argumentGPR1, ...).
365     // Host function signature is f(ExecState*).
366     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
367
368     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::argumentGPR1);
369     if (thunkFunctionType == ThunkFunctionType::JSFunction) {
370         jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
371         jit.xorPtr(JSInterfaceJIT::TrustedImmPtr(JSFunctionPoison::key()), JSInterfaceJIT::regT2);
372         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), NoPtrTag);
373     } else
374         jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, InternalFunction::offsetOfNativeFunctionFor(kind)), NoPtrTag);
375
376 #if CPU(MIPS)
377     // Restore stack space
378     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
379 #endif
380 #else
381 #error "JIT not supported on this platform."
382     UNUSED_PARAM(executableOffsetToFunction);
383     abortWithReason(TGNotSupported);
384 #endif
385
386     // Check for an exception
387 #if USE(JSVALUE64)
388     jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
389     JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
390 #else
391     JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
392         JSInterfaceJIT::NotEqual,
393         JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
394         JSInterfaceJIT::TrustedImm32(0));
395 #endif
396
397     jit.emitFunctionEpilogue();
398     // Return.
399     jit.ret();
400
401     // Handle an exception
402     exceptionHandler.link(&jit);
403
404     jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
405     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
406
407 #if CPU(X86) && USE(JSVALUE32_64)
408     jit.subPtr(JSInterfaceJIT::TrustedImm32(4), JSInterfaceJIT::stackPointerRegister);
409     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
410     jit.push(JSInterfaceJIT::regT0);
411 #else
412 #if OS(WINDOWS)
413     // Allocate space on stack for the 4 parameter registers.
414     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
415 #endif
416     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
417 #endif
418     jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException, NoPtrTag).value()), JSInterfaceJIT::regT3);
419     jit.call(JSInterfaceJIT::regT3, NoPtrTag);
420 #if CPU(X86) && USE(JSVALUE32_64)
421     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
422 #elif OS(WINDOWS)
423     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
424 #endif
425
426     jit.jumpToExceptionHandler(*vm);
427
428     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
429     return FINALIZE_CODE(patchBuffer, NoPtrTag, "%s %s%s trampoline", thunkFunctionType == ThunkFunctionType::JSFunction ? "native" : "internal", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data());
430 }
431
432 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
433 {
434     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall);
435 }
436
437 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
438 {
439     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithSavedTags);
440 }
441
442 MacroAssemblerCodeRef nativeTailCallWithoutSavedTagsGenerator(VM* vm)
443 {
444     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithoutSavedTags);
445 }
446
447 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
448 {
449     return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForConstruct);
450 }
451
452 MacroAssemblerCodeRef internalFunctionCallGenerator(VM* vm)
453 {
454     return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForCall);
455 }
456
457 MacroAssemblerCodeRef internalFunctionConstructGenerator(VM* vm)
458 {
459     return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForConstruct);
460 }
461
462 MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
463 {
464     JSInterfaceJIT jit(vm);
465
466     // We enter with fixup count in argumentGPR0
467     // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
468 #if USE(JSVALUE64)
469 #if OS(WINDOWS)
470     const GPRReg extraTemp = JSInterfaceJIT::regT0;
471 #else
472     const GPRReg extraTemp = JSInterfaceJIT::regT5;
473 #endif
474 #  if CPU(X86_64)
475     jit.pop(JSInterfaceJIT::regT4);
476 #  endif
477     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
478     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
479     jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
480
481     // Check to see if we have extra slots we can use
482     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
483     jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
484     JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
485     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
486     JSInterfaceJIT::Label fillExtraSlots(jit.label());
487     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
488     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
489     jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
490     jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
491     JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
492     noExtraSlot.link(&jit);
493
494     jit.neg64(JSInterfaceJIT::argumentGPR0);
495
496     // Adjust call frame register and stack pointer to account for missing args.
497     // We need to change the stack pointer first before performing copy/fill loops.
498     // This stack space below the stack pointer is considered unsed by OS. Therefore,
499     // OS may corrupt this space when constructing a signal stack.
500     jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
501     jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
502     jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
503     jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
504
505     // Move current frame down argumentGPR0 number of slots
506     JSInterfaceJIT::Label copyLoop(jit.label());
507     jit.load64(JSInterfaceJIT::regT3, extraTemp);
508     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
509     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
510     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
511
512     // Fill in argumentGPR0 missing arg slots with undefined
513     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
514     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
515     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
516     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
517     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
518     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
519     
520     done.link(&jit);
521
522 #  if CPU(X86_64)
523     jit.push(JSInterfaceJIT::regT4);
524 #  endif
525     jit.ret();
526 #else
527 #  if CPU(X86)
528     jit.pop(JSInterfaceJIT::regT4);
529 #  endif
530     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
531     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
532     jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
533
534     // Check to see if we have extra slots we can use
535     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
536     jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
537     JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
538     JSInterfaceJIT::Label fillExtraSlots(jit.label());
539     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
540     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
541     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
542     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
543     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
544     jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
545     jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
546     JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
547     noExtraSlot.link(&jit);
548
549     jit.neg32(JSInterfaceJIT::argumentGPR0);
550
551     // Move current frame down argumentGPR0 number of slots
552     JSInterfaceJIT::Label copyLoop(jit.label());
553     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
554     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
555     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
556     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
557     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
558     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
559
560     // Fill in argumentGPR0 missing arg slots with undefined
561     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
562     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
563     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
564     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
565     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
566     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
567
568     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
569     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
570
571     // Adjust call frame register and stack pointer to account for missing args
572     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
573     jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
574     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
575     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
576
577     done.link(&jit);
578
579 #  if CPU(X86)
580     jit.push(JSInterfaceJIT::regT4);
581 #  endif
582     jit.ret();
583 #endif
584
585     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
586     return FINALIZE_CODE(patchBuffer, NoPtrTag, "fixup arity");
587 }
588
589 MacroAssemblerCodeRef unreachableGenerator(VM* vm)
590 {
591     JSInterfaceJIT jit(vm);
592
593     jit.breakpoint();
594
595     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
596     return FINALIZE_CODE(patchBuffer, NoPtrTag, "unreachable thunk");
597 }
598
599 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
600 {
601     // load string
602     jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
603
604     // Load string length to regT2, and start the process of loading the data pointer into regT0
605     jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
606     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
607     jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
608
609     // load index
610     jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
611
612     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
613     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
614
615     // Load the character
616     SpecializedThunkJIT::JumpList is16Bit;
617     SpecializedThunkJIT::JumpList cont8Bit;
618     // Load the string flags
619     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
620     jit.and32(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::maskOffset()), SpecializedThunkJIT::regT1);
621     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
622     is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
623     jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
624     cont8Bit.append(jit.jump());
625     is16Bit.link(&jit);
626     jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
627     cont8Bit.link(&jit);
628 }
629
630 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
631 {
632     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
633     jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
634     jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
635     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
636 }
637
638 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
639 {
640     SpecializedThunkJIT jit(vm, 1);
641     stringCharLoad(jit, vm);
642     jit.returnInt32(SpecializedThunkJIT::regT0);
643     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
644 }
645
646 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
647 {
648     SpecializedThunkJIT jit(vm, 1);
649     stringCharLoad(jit, vm);
650     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
651     jit.returnJSCell(SpecializedThunkJIT::regT0);
652     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
653 }
654
655 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
656 {
657     SpecializedThunkJIT jit(vm, 1);
658     // load char code
659     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
660     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
661     jit.returnJSCell(SpecializedThunkJIT::regT0);
662     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
663 }
664
665 MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
666 {
667     SpecializedThunkJIT jit(vm, 1);
668     MacroAssembler::Jump nonIntArgJump;
669     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
670
671     SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
672     jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
673     jit.returnInt32(SpecializedThunkJIT::regT1);
674
675     if (jit.supportsFloatingPointTruncate()) {
676         nonIntArgJump.link(&jit);
677         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
678         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
679         jit.appendFailure(jit.jump());
680     } else
681         jit.appendFailure(nonIntArgJump);
682
683     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
684 }
685
686 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
687 {
688     SpecializedThunkJIT jit(vm, 1);
689     if (!jit.supportsFloatingPointSqrt())
690         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
691
692     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
693     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
694     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
695     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
696 }
697
698
699 #define UnaryDoubleOpWrapper(function) function##Wrapper
700 enum MathThunkCallingConvention { };
701 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
702
703 #if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
704
705 #define defineUnaryDoubleOpWrapper(function) \
706     asm( \
707         ".text\n" \
708         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
709         HIDE_SYMBOL(function##Thunk) "\n" \
710         SYMBOL_STRING(function##Thunk) ":" "\n" \
711         "pushq %rax\n" \
712         "call " GLOBAL_REFERENCE(function) "\n" \
713         "popq %rcx\n" \
714         "ret\n" \
715     );\
716     extern "C" { \
717         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
718     } \
719     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
720
721 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
722 #define defineUnaryDoubleOpWrapper(function) \
723     asm( \
724         ".text\n" \
725         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
726         HIDE_SYMBOL(function##Thunk) "\n" \
727         SYMBOL_STRING(function##Thunk) ":" "\n" \
728         "pushl %ebx\n" \
729         "subl $20, %esp\n" \
730         "movsd %xmm0, (%esp) \n" \
731         "call __x86.get_pc_thunk.bx\n" \
732         "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
733         "call " GLOBAL_REFERENCE(function) "\n" \
734         "fstpl (%esp) \n" \
735         "movsd (%esp), %xmm0 \n" \
736         "addl $20, %esp\n" \
737         "popl %ebx\n" \
738         "ret\n" \
739     );\
740     extern "C" { \
741         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
742     } \
743     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
744
745 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
746 #define defineUnaryDoubleOpWrapper(function) \
747     asm( \
748         ".text\n" \
749         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
750         HIDE_SYMBOL(function##Thunk) "\n" \
751         SYMBOL_STRING(function##Thunk) ":" "\n" \
752         "subl $20, %esp\n" \
753         "movsd %xmm0, (%esp) \n" \
754         "call " GLOBAL_REFERENCE(function) "\n" \
755         "fstpl (%esp) \n" \
756         "movsd (%esp), %xmm0 \n" \
757         "addl $20, %esp\n" \
758         "ret\n" \
759     );\
760     extern "C" { \
761         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
762     } \
763     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
764
765 #elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
766
767 #define defineUnaryDoubleOpWrapper(function) \
768     asm( \
769         ".text\n" \
770         ".align 2\n" \
771         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
772         HIDE_SYMBOL(function##Thunk) "\n" \
773         ".thumb\n" \
774         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
775         SYMBOL_STRING(function##Thunk) ":" "\n" \
776         "push {lr}\n" \
777         "vmov r0, r1, d0\n" \
778         "blx " GLOBAL_REFERENCE(function) "\n" \
779         "vmov d0, r0, r1\n" \
780         "pop {lr}\n" \
781         "bx lr\n" \
782     ); \
783     extern "C" { \
784         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
785     } \
786     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
787
788 #elif CPU(ARM64)
789
790 #define defineUnaryDoubleOpWrapper(function) \
791     asm( \
792         ".text\n" \
793         ".align 2\n" \
794         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
795         HIDE_SYMBOL(function##Thunk) "\n" \
796         SYMBOL_STRING(function##Thunk) ":" "\n" \
797         "b " GLOBAL_REFERENCE(function) "\n" \
798         ".previous" \
799     ); \
800     extern "C" { \
801         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
802     } \
803     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
804
805 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
806
807 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
808 static double (_cdecl *floorFunction)(double) = floor;
809 static double (_cdecl *ceilFunction)(double) = ceil;
810 static double (_cdecl *truncFunction)(double) = trunc;
811 static double (_cdecl *expFunction)(double) = exp;
812 static double (_cdecl *logFunction)(double) = log;
813 static double (_cdecl *jsRoundFunction)(double) = jsRound;
814
815 #define defineUnaryDoubleOpWrapper(function) \
816     extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
817     { \
818         __asm \
819         { \
820         __asm sub esp, 20 \
821         __asm movsd mmword ptr [esp], xmm0  \
822         __asm call function##Function \
823         __asm fstp qword ptr [esp] \
824         __asm movsd xmm0, mmword ptr [esp] \
825         __asm add esp, 20 \
826         __asm ret \
827         } \
828     } \
829     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
830
831 #else
832
833 #define defineUnaryDoubleOpWrapper(function) \
834     static MathThunk UnaryDoubleOpWrapper(function) = 0
835 #endif
836
837 defineUnaryDoubleOpWrapper(jsRound);
838 defineUnaryDoubleOpWrapper(exp);
839 defineUnaryDoubleOpWrapper(log);
840 defineUnaryDoubleOpWrapper(floor);
841 defineUnaryDoubleOpWrapper(ceil);
842 defineUnaryDoubleOpWrapper(trunc);
843
844 static const double halfConstant = 0.5;
845     
846 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
847 {
848     SpecializedThunkJIT jit(vm, 1);
849     MacroAssembler::Jump nonIntJump;
850     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
851         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
852     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
853     jit.returnInt32(SpecializedThunkJIT::regT0);
854     nonIntJump.link(&jit);
855     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
856
857     if (jit.supportsFloatingPointRounding()) {
858         SpecializedThunkJIT::JumpList doubleResult;
859         jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
860         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
861         jit.returnInt32(SpecializedThunkJIT::regT0);
862         doubleResult.link(&jit);
863         jit.returnDouble(SpecializedThunkJIT::fpRegT0);
864         return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
865     }
866
867     SpecializedThunkJIT::Jump intResult;
868     SpecializedThunkJIT::JumpList doubleResult;
869     if (jit.supportsFloatingPointTruncate()) {
870         jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
871         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
872         SpecializedThunkJIT::JumpList slowPath;
873         // Handle the negative doubles in the slow path for now.
874         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
875         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
876         intResult = jit.jump();
877         slowPath.link(&jit);
878     }
879     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
880     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
881     if (jit.supportsFloatingPointTruncate())
882         intResult.link(&jit);
883     jit.returnInt32(SpecializedThunkJIT::regT0);
884     doubleResult.link(&jit);
885     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
886     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
887 }
888
889 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
890 {
891     SpecializedThunkJIT jit(vm, 1);
892     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
893         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
894     MacroAssembler::Jump nonIntJump;
895     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
896     jit.returnInt32(SpecializedThunkJIT::regT0);
897     nonIntJump.link(&jit);
898     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
899     if (jit.supportsFloatingPointRounding())
900         jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
901     else
902         jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
903
904     SpecializedThunkJIT::JumpList doubleResult;
905     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
906     jit.returnInt32(SpecializedThunkJIT::regT0);
907     doubleResult.link(&jit);
908     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
909     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
910 }
911
912 MacroAssemblerCodeRef truncThunkGenerator(VM* vm)
913 {
914     SpecializedThunkJIT jit(vm, 1);
915     if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
916         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
917     MacroAssembler::Jump nonIntJump;
918     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
919     jit.returnInt32(SpecializedThunkJIT::regT0);
920     nonIntJump.link(&jit);
921     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
922     if (jit.supportsFloatingPointRounding())
923         jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
924     else
925         jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
926
927     SpecializedThunkJIT::JumpList doubleResult;
928     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
929     jit.returnInt32(SpecializedThunkJIT::regT0);
930     doubleResult.link(&jit);
931     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
932     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "trunc");
933 }
934
935 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
936 {
937     SpecializedThunkJIT jit(vm, 1);
938     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
939         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
940     MacroAssembler::Jump nonIntJump;
941     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
942     jit.returnInt32(SpecializedThunkJIT::regT0);
943     nonIntJump.link(&jit);
944     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
945     SpecializedThunkJIT::Jump intResult;
946     SpecializedThunkJIT::JumpList doubleResult;
947     if (jit.supportsFloatingPointTruncate()) {
948         jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
949         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
950         SpecializedThunkJIT::JumpList slowPath;
951         // Handle the negative doubles in the slow path for now.
952         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
953         jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
954         jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
955         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
956         intResult = jit.jump();
957         slowPath.link(&jit);
958     }
959     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
960     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
961     if (jit.supportsFloatingPointTruncate())
962         intResult.link(&jit);
963     jit.returnInt32(SpecializedThunkJIT::regT0);
964     doubleResult.link(&jit);
965     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
966     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
967 }
968
969 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
970 {
971     if (!UnaryDoubleOpWrapper(exp))
972         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
973     SpecializedThunkJIT jit(vm, 1);
974     if (!jit.supportsFloatingPoint())
975         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
976     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
977     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
978     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
979     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
980 }
981
982 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
983 {
984     if (!UnaryDoubleOpWrapper(log))
985         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
986     SpecializedThunkJIT jit(vm, 1);
987     if (!jit.supportsFloatingPoint())
988         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
989     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
990     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
991     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
992     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
993 }
994
995 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
996 {
997     SpecializedThunkJIT jit(vm, 1);
998     if (!jit.supportsFloatingPointAbs())
999         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1000
1001 #if USE(JSVALUE64)
1002     unsigned virtualRegisterIndex = CallFrame::argumentOffset(0);
1003     jit.load64(AssemblyHelpers::addressFor(virtualRegisterIndex), GPRInfo::regT0);
1004     MacroAssembler::Jump notInteger = jit.branch64(MacroAssembler::Below, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister);
1005
1006     // Abs Int32.
1007     jit.rshift32(GPRInfo::regT0, MacroAssembler::TrustedImm32(31), GPRInfo::regT1);
1008     jit.add32(GPRInfo::regT1, GPRInfo::regT0);
1009     jit.xor32(GPRInfo::regT1, GPRInfo::regT0);
1010
1011     // IntMin cannot be inverted.
1012     MacroAssembler::Jump integerIsIntMin = jit.branchTest32(MacroAssembler::Signed, GPRInfo::regT0);
1013
1014     // Box and finish.
1015     jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
1016     MacroAssembler::Jump doneWithIntegers = jit.jump();
1017
1018     // Handle Doubles.
1019     notInteger.link(&jit);
1020     jit.appendFailure(jit.branchTest64(MacroAssembler::Zero, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister));
1021     jit.unboxDoubleWithoutAssertions(GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT0);
1022     MacroAssembler::Label absFPR0Label = jit.label();
1023     jit.absDouble(FPRInfo::fpRegT0, FPRInfo::fpRegT1);
1024     jit.boxDouble(FPRInfo::fpRegT1, GPRInfo::regT0);
1025
1026     // Tail.
1027     doneWithIntegers.link(&jit);
1028     jit.returnJSValue(GPRInfo::regT0);
1029
1030     // We know the value of regT0 is IntMin. We could load that value from memory but
1031     // it is simpler to just convert it.
1032     integerIsIntMin.link(&jit);
1033     jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
1034     jit.jump().linkTo(absFPR0Label, &jit);
1035 #else
1036     MacroAssembler::Jump nonIntJump;
1037     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
1038     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
1039     jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1040     jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1041     jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
1042     jit.returnInt32(SpecializedThunkJIT::regT0);
1043     nonIntJump.link(&jit);
1044     // Shame about the double int conversion here.
1045     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1046     jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
1047     jit.returnDouble(SpecializedThunkJIT::fpRegT1);
1048 #endif
1049     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
1050 }
1051
1052 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
1053 {
1054     SpecializedThunkJIT jit(vm, 2);
1055     MacroAssembler::Jump nonIntArg0Jump;
1056     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1057     SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1058     MacroAssembler::Jump nonIntArg1Jump;
1059     jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1060     SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1061     jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1062     jit.returnInt32(SpecializedThunkJIT::regT0);
1063
1064     if (jit.supportsFloatingPointTruncate()) {
1065         nonIntArg0Jump.link(&jit);
1066         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1067         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1068         jit.appendFailure(jit.jump());
1069     } else
1070         jit.appendFailure(nonIntArg0Jump);
1071
1072     if (jit.supportsFloatingPointTruncate()) {
1073         nonIntArg1Jump.link(&jit);
1074         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1075         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1076         jit.appendFailure(jit.jump());
1077     } else
1078         jit.appendFailure(nonIntArg1Jump);
1079
1080     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1081 }
1082
1083 MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
1084 {
1085     SpecializedThunkJIT jit(vm, 0);
1086     if (!jit.supportsFloatingPoint())
1087         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1088
1089 #if USE(JSVALUE64)
1090     jit.emitRandomThunk(*vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
1091     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1092
1093     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
1094 #else
1095     return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1096 #endif
1097 }
1098
1099 MacroAssemblerCodeRef boundThisNoArgsFunctionCallGenerator(VM* vm)
1100 {
1101     CCallHelpers jit;
1102     
1103     jit.emitFunctionPrologue();
1104     
1105     // Set up our call frame.
1106     jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock));
1107     jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCount));
1108
1109     unsigned extraStackNeeded = 0;
1110     if (unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes())
1111         extraStackNeeded = stackAlignmentBytes() - stackMisalignment;
1112     
1113     // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail
1114     // call here as far as I can tell. At least not so long as the generic path doesn't do a tail
1115     // call, since that would be way too weird.
1116     
1117     // The formula for the number of stack bytes needed given some number of parameters (including
1118     // this) is:
1119     //
1120     //     stackAlign((numParams + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC))
1121     //
1122     // Probably we want to write this as:
1123     //
1124     //     stackAlign((numParams + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register))
1125     //
1126     // That's really all there is to this. We have all the registers we need to do it.
1127     
1128     jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCount), GPRInfo::regT1);
1129     jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters), GPRInfo::regT1, GPRInfo::regT2);
1130     jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2);
1131     jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2);
1132     jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2);
1133     
1134     if (extraStackNeeded)
1135         jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2);
1136     
1137     // At this point regT1 has the actual argument count and regT2 has the amount of stack we will
1138     // need.
1139     
1140     jit.subPtr(GPRInfo::regT2, CCallHelpers::stackPointerRegister);
1141
1142     // Do basic callee frame setup, including 'this'.
1143     
1144     jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT3);
1145
1146     jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCount));
1147     
1148     JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT2);
1149     jit.loadValue(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfBoundThis()), valueRegs);
1150     jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0));
1151
1152     jit.loadPtr(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfTargetFunction()), GPRInfo::regT3);
1153     jit.storeCell(GPRInfo::regT3, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
1154     
1155     // OK, now we can start copying. This is a simple matter of copying parameters from the caller's
1156     // frame to the callee's frame. Note that we know that regT1 (the argument count) must be at
1157     // least 1.
1158     jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1159     CCallHelpers::Jump done = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT1);
1160     
1161     CCallHelpers::Label loop = jit.label();
1162     jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1163     jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgument(1)).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight), valueRegs);
1164     jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
1165     jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT1).linkTo(loop, &jit);
1166     
1167     done.link(&jit);
1168     
1169     jit.loadPtr(
1170         CCallHelpers::Address(GPRInfo::regT3, JSFunction::offsetOfExecutable()),
1171         GPRInfo::regT0);
1172     jit.xorPtr(CCallHelpers::TrustedImmPtr(JSFunctionPoison::key()), GPRInfo::regT0);
1173     jit.loadPtr(
1174         CCallHelpers::Address(
1175             GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
1176         GPRInfo::regT0);
1177     CCallHelpers::Jump noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0);
1178     
1179 #if USE(JSVALUE64)
1180     jit.move(CCallHelpers::TrustedImm64(JITCodePoison::key()), GPRInfo::regT1);
1181     jit.xor64(GPRInfo::regT1, GPRInfo::regT0);
1182 #endif
1183     emitPointerValidation(jit, GPRInfo::regT0);
1184     jit.call(GPRInfo::regT0, NoPtrTag);
1185     
1186     jit.emitFunctionEpilogue();
1187     jit.ret();
1188     
1189     LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID);
1190     linkBuffer.link(noCode, CodeLocationLabel(vm->jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
1191     return FINALIZE_CODE(
1192         linkBuffer, NoPtrTag, "Specialized thunk for bound function calls with no arguments");
1193 }
1194
1195 } // namespace JSC
1196
1197 #endif // ENABLE(JIT)