WebAssembly: WasmB3IRGenerator should throw exceptions instead of crash
[WebKit-https.git] / Source / JavaScriptCore / jit / ThunkGenerators.cpp
1 /*
2  * Copyright (C) 2010, 2012-2014, 2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITExceptions.h"
32 #include "JITOperations.h"
33 #include "JSArray.h"
34 #include "JSBoundFunction.h"
35 #include "MathCommon.h"
36 #include "MaxFrameExtentForSlowPathCall.h"
37 #include "JSCInlines.h"
38 #include "JSWebAssemblyInstance.h"
39 #include "JSWebAssemblyRuntimeError.h"
40 #include "SpecializedThunkJIT.h"
41 #include "WasmExceptionType.h"
42 #include <wtf/InlineASM.h>
43 #include <wtf/StringPrintStream.h>
44 #include <wtf/text/StringImpl.h>
45
46 #if ENABLE(JIT)
47
48 namespace JSC {
49
50 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
51 {
52     if (ASSERT_DISABLED)
53         return;
54     CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
55     jit.abortWithReason(TGInvalidPointer);
56     isNonZero.link(&jit);
57     jit.pushToSave(pointerGPR);
58     jit.load8(pointerGPR, pointerGPR);
59     jit.popToRestore(pointerGPR);
60 }
61
62 // We will jump here if the JIT code tries to make a call, but the
63 // linking helper (C++ code) decides to throw an exception instead.
64 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
65 {
66     CCallHelpers jit(vm);
67     
68     // The call pushed a return address, so we need to pop it back off to re-align the stack,
69     // even though we won't use it.
70     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
71
72     jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
73
74     jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
75     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
76     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
77     jit.call(GPRInfo::nonArgGPR0);
78     jit.jumpToExceptionHandler();
79
80     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
81     return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
82 }
83
84 static void slowPathFor(
85     CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
86 {
87     jit.emitFunctionPrologue();
88     jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
89 #if OS(WINDOWS) && CPU(X86_64)
90     // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
91     // Other argument values are shift by 1. Use space on the stack for our two return values.
92     // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
93     // and space for the 16 byte return area.
94     jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
95     jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
96     jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
97     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
98     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
99     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
100     jit.call(GPRInfo::nonArgGPR0);
101     jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
102     jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
103     jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
104 #else
105     if (maxFrameExtentForSlowPathCall)
106         jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
107     jit.setupArgumentsWithExecState(GPRInfo::regT2);
108     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
109     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
110     jit.call(GPRInfo::nonArgGPR0);
111     if (maxFrameExtentForSlowPathCall)
112         jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
113 #endif
114
115     // This slow call will return the address of one of the following:
116     // 1) Exception throwing thunk.
117     // 2) Host call return value returner thingy.
118     // 3) The function to call.
119     // The second return value GPR will hold a non-zero value for tail calls.
120
121     emitPointerValidation(jit, GPRInfo::returnValueGPR);
122     jit.emitFunctionEpilogue();
123
124     RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
125     CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
126
127     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
128     jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
129
130     doNotTrash.link(&jit);
131     jit.jump(GPRInfo::returnValueGPR);
132 }
133
134 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
135 {
136     // The return address is on the stack or in the link register. We will hence
137     // save the return address to the call frame while we make a C++ function call
138     // to perform linking and lazy compilation if necessary. We expect the callee
139     // to be in regT0/regT1 (payload/tag), the CallFrame to have already
140     // been adjusted, and all other registers to be available for use.
141     CCallHelpers jit(vm);
142     
143     slowPathFor(jit, vm, operationLinkCall);
144     
145     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
146     return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk"));
147 }
148
149 // For closure optimizations, we only include calls, since if you're using closures for
150 // object construction then you're going to lose big time anyway.
151 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
152 {
153     CCallHelpers jit(vm);
154     
155     slowPathFor(jit, vm, operationLinkPolymorphicCall);
156     
157     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
158     return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk"));
159 }
160
161 // FIXME: We should distinguish between a megamorphic virtual call vs. a slow
162 // path virtual call so that we can enable fast tail calls for megamorphic
163 // virtual calls by using the shuffler.
164 // https://bugs.webkit.org/show_bug.cgi?id=148831
165 MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
166 {
167     // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
168     // The return address is on the stack, or in the link register. We will hence
169     // jump to the callee, or save the return address to the call frame while we
170     // make a C++ function call to the appropriate JIT operation.
171
172     CCallHelpers jit(vm);
173     
174     CCallHelpers::JumpList slowCase;
175     
176     // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
177     // slow path execution for the profiler.
178     jit.add32(
179         CCallHelpers::TrustedImm32(1),
180         CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
181
182     // FIXME: we should have a story for eliminating these checks. In many cases,
183     // the DFG knows that the value is definitely a cell, or definitely a function.
184     
185 #if USE(JSVALUE64)
186     slowCase.append(
187         jit.branchTest64(
188             CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::tagMaskRegister));
189 #else
190     slowCase.append(
191         jit.branch32(
192             CCallHelpers::NotEqual, GPRInfo::regT1,
193             CCallHelpers::TrustedImm32(JSValue::CellTag)));
194 #endif
195     slowCase.append(jit.branchIfNotType(GPRInfo::regT0, JSFunctionType));
196     
197     // Now we know we have a JSFunction.
198     
199     jit.loadPtr(
200         CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
201         GPRInfo::regT4);
202     jit.loadPtr(
203         CCallHelpers::Address(
204             GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
205                 callLinkInfo.specializationKind())),
206         GPRInfo::regT4);
207     slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
208     
209     // Now we know that we have a CodeBlock, and we're committed to making a fast
210     // call.
211     
212     // Make a tail call. This will return back to JIT code.
213     emitPointerValidation(jit, GPRInfo::regT4);
214     if (callLinkInfo.isTailCall()) {
215         jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
216         jit.prepareForTailCallSlow(GPRInfo::regT4);
217     }
218     jit.jump(GPRInfo::regT4);
219
220     slowCase.link(&jit);
221     
222     // Here we don't know anything, so revert to the full slow path.
223     
224     slowPathFor(jit, vm, operationVirtualCall);
225     
226     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
227     return FINALIZE_CODE(
228         patchBuffer,
229         ("Virtual %s slow path thunk",
230         callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
231 }
232
233 enum ThunkEntryType { EnterViaCall, EnterViaJumpWithSavedTags, EnterViaJumpWithoutSavedTags };
234
235 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
236 {
237     // FIXME: This should be able to log ShadowChicken prologue packets.
238     // https://bugs.webkit.org/show_bug.cgi?id=155689
239     
240     int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
241     
242     JSInterfaceJIT jit(vm);
243
244     switch (entryType) {
245     case EnterViaCall:
246         jit.emitFunctionPrologue();
247         break;
248     case EnterViaJumpWithSavedTags:
249 #if USE(JSVALUE64)
250         // We're coming from a specialized thunk that has saved the prior tag registers' contents.
251         // Restore them now.
252 #if CPU(ARM64)
253         jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
254 #else
255         jit.pop(JSInterfaceJIT::tagMaskRegister);
256         jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
257 #endif
258 #endif
259         break;
260     case EnterViaJumpWithoutSavedTags:
261         jit.move(JSInterfaceJIT::framePointerRegister, JSInterfaceJIT::stackPointerRegister);
262         break;
263     }
264
265     jit.emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
266     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
267
268 #if CPU(X86)
269     // Calling convention:      f(ecx, edx, ...);
270     // Host function signature: f(ExecState*);
271     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
272
273     jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
274
275     // call the function
276     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::regT1);
277     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
278     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
279
280     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
281
282 #elif CPU(X86_64)
283 #if !OS(WINDOWS)
284     // Calling convention:      f(edi, esi, edx, ecx, ...);
285     // Host function signature: f(ExecState*);
286     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
287
288     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::esi);
289     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
290     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
291
292 #else
293     // Calling convention:      f(ecx, edx, r8, r9, ...);
294     // Host function signature: f(ExecState*);
295     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
296
297     // Leave space for the callee parameter home addresses.
298     // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
299     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
300
301     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::edx);
302     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
303     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
304
305     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
306 #endif
307
308 #elif CPU(ARM64)
309     COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
310     COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
311     COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
312
313     // Host function signature: f(ExecState*);
314     jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
315
316     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, ARM64Registers::x1);
317     jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
318     jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
319 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
320 #if CPU(MIPS)
321     // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
322     jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
323 #endif
324
325     // Calling convention is f(argumentGPR0, argumentGPR1, ...).
326     // Host function signature is f(ExecState*).
327     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
328
329     jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::argumentGPR1);
330     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
331     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
332
333 #if CPU(MIPS)
334     // Restore stack space
335     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
336 #endif
337 #else
338 #error "JIT not supported on this platform."
339     UNUSED_PARAM(executableOffsetToFunction);
340     abortWithReason(TGNotSupported);
341 #endif
342
343     // Check for an exception
344 #if USE(JSVALUE64)
345     jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
346     JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
347 #else
348     JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
349         JSInterfaceJIT::NotEqual,
350         JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
351         JSInterfaceJIT::TrustedImm32(0));
352 #endif
353
354     jit.emitFunctionEpilogue();
355     // Return.
356     jit.ret();
357
358     // Handle an exception
359     exceptionHandler.link(&jit);
360
361     jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
362     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
363
364 #if CPU(X86) && USE(JSVALUE32_64)
365     jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
366     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
367     jit.push(JSInterfaceJIT::regT0);
368 #else
369 #if OS(WINDOWS)
370     // Allocate space on stack for the 4 parameter registers.
371     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
372 #endif
373     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
374 #endif
375     jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
376     jit.call(JSInterfaceJIT::regT3);
377 #if CPU(X86) && USE(JSVALUE32_64)
378     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
379 #elif OS(WINDOWS)
380     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
381 #endif
382
383     jit.jumpToExceptionHandler();
384
385     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
386     return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data()));
387 }
388
389 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
390 {
391     return nativeForGenerator(vm, CodeForCall);
392 }
393
394 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
395 {
396     return nativeForGenerator(vm, CodeForCall, EnterViaJumpWithSavedTags);
397 }
398
399 MacroAssemblerCodeRef nativeTailCallWithoutSavedTagsGenerator(VM* vm)
400 {
401     return nativeForGenerator(vm, CodeForCall, EnterViaJumpWithoutSavedTags);
402 }
403
404 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
405 {
406     return nativeForGenerator(vm, CodeForConstruct);
407 }
408
409 MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
410 {
411     JSInterfaceJIT jit(vm);
412
413     // We enter with fixup count in argumentGPR0
414     // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
415 #if USE(JSVALUE64)
416 #if OS(WINDOWS)
417     const GPRReg extraTemp = JSInterfaceJIT::regT0;
418 #else
419     const GPRReg extraTemp = JSInterfaceJIT::regT5;
420 #endif
421 #  if CPU(X86_64)
422     jit.pop(JSInterfaceJIT::regT4);
423 #  endif
424     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
425     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
426     jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
427
428     // Check to see if we have extra slots we can use
429     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
430     jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
431     JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
432     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
433     JSInterfaceJIT::Label fillExtraSlots(jit.label());
434     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
435     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
436     jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
437     jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
438     JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
439     noExtraSlot.link(&jit);
440
441     jit.neg64(JSInterfaceJIT::argumentGPR0);
442
443     // Move current frame down argumentGPR0 number of slots
444     JSInterfaceJIT::Label copyLoop(jit.label());
445     jit.load64(JSInterfaceJIT::regT3, extraTemp);
446     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
447     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
448     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
449
450     // Fill in argumentGPR0 missing arg slots with undefined
451     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
452     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
453     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
454     jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
455     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
456     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
457     
458     // Adjust call frame register and stack pointer to account for missing args
459     jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
460     jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
461     jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
462     jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
463
464     done.link(&jit);
465
466 #  if CPU(X86_64)
467     jit.push(JSInterfaceJIT::regT4);
468 #  endif
469     jit.ret();
470 #else
471 #  if CPU(X86)
472     jit.pop(JSInterfaceJIT::regT4);
473 #  endif
474     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
475     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
476     jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
477
478     // Check to see if we have extra slots we can use
479     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
480     jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
481     JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
482     JSInterfaceJIT::Label fillExtraSlots(jit.label());
483     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
484     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
485     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
486     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
487     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
488     jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
489     jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
490     JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
491     noExtraSlot.link(&jit);
492
493     jit.neg32(JSInterfaceJIT::argumentGPR0);
494
495     // Move current frame down argumentGPR0 number of slots
496     JSInterfaceJIT::Label copyLoop(jit.label());
497     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
498     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
499     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
500     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
501     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
502     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
503
504     // Fill in argumentGPR0 missing arg slots with undefined
505     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
506     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
507     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
508     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
509     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
510     jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
511
512     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
513     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
514
515     // Adjust call frame register and stack pointer to account for missing args
516     jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
517     jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
518     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
519     jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
520
521     done.link(&jit);
522
523 #  if CPU(X86)
524     jit.push(JSInterfaceJIT::regT4);
525 #  endif
526     jit.ret();
527 #endif
528
529     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
530     return FINALIZE_CODE(patchBuffer, ("fixup arity"));
531 }
532
533 MacroAssemblerCodeRef unreachableGenerator(VM* vm)
534 {
535     JSInterfaceJIT jit(vm);
536
537     jit.breakpoint();
538
539     LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
540     return FINALIZE_CODE(patchBuffer, ("unreachable thunk"));
541 }
542
543 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
544 {
545     // load string
546     jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
547
548     // Load string length to regT2, and start the process of loading the data pointer into regT0
549     jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
550     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
551     jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
552
553     // load index
554     jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
555
556     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
557     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
558
559     // Load the character
560     SpecializedThunkJIT::JumpList is16Bit;
561     SpecializedThunkJIT::JumpList cont8Bit;
562     // Load the string flags
563     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
564     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
565     is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
566     jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
567     cont8Bit.append(jit.jump());
568     is16Bit.link(&jit);
569     jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
570     cont8Bit.link(&jit);
571 }
572
573 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
574 {
575     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
576     jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
577     jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
578     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
579 }
580
581 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
582 {
583     SpecializedThunkJIT jit(vm, 1);
584     stringCharLoad(jit, vm);
585     jit.returnInt32(SpecializedThunkJIT::regT0);
586     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
587 }
588
589 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
590 {
591     SpecializedThunkJIT jit(vm, 1);
592     stringCharLoad(jit, vm);
593     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
594     jit.returnJSCell(SpecializedThunkJIT::regT0);
595     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
596 }
597
598 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
599 {
600     SpecializedThunkJIT jit(vm, 1);
601     // load char code
602     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
603     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
604     jit.returnJSCell(SpecializedThunkJIT::regT0);
605     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
606 }
607
608 MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
609 {
610     SpecializedThunkJIT jit(vm, 1);
611     MacroAssembler::Jump nonIntArgJump;
612     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
613
614     SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
615     jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
616     jit.returnInt32(SpecializedThunkJIT::regT1);
617
618     if (jit.supportsFloatingPointTruncate()) {
619         nonIntArgJump.link(&jit);
620         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
621         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
622         jit.appendFailure(jit.jump());
623     } else
624         jit.appendFailure(nonIntArgJump);
625
626     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
627 }
628
629 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
630 {
631     SpecializedThunkJIT jit(vm, 1);
632     if (!jit.supportsFloatingPointSqrt())
633         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
634
635     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
636     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
637     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
638     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
639 }
640
641
642 #define UnaryDoubleOpWrapper(function) function##Wrapper
643 enum MathThunkCallingConvention { };
644 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
645
646 #if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
647
648 #define defineUnaryDoubleOpWrapper(function) \
649     asm( \
650         ".text\n" \
651         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
652         HIDE_SYMBOL(function##Thunk) "\n" \
653         SYMBOL_STRING(function##Thunk) ":" "\n" \
654         "pushq %rax\n" \
655         "call " GLOBAL_REFERENCE(function) "\n" \
656         "popq %rcx\n" \
657         "ret\n" \
658     );\
659     extern "C" { \
660         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
661     } \
662     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
663
664 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
665 #define defineUnaryDoubleOpWrapper(function) \
666     asm( \
667         ".text\n" \
668         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
669         HIDE_SYMBOL(function##Thunk) "\n" \
670         SYMBOL_STRING(function##Thunk) ":" "\n" \
671         "pushl %ebx\n" \
672         "subl $20, %esp\n" \
673         "movsd %xmm0, (%esp) \n" \
674         "call __x86.get_pc_thunk.bx\n" \
675         "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
676         "call " GLOBAL_REFERENCE(function) "\n" \
677         "fstpl (%esp) \n" \
678         "movsd (%esp), %xmm0 \n" \
679         "addl $20, %esp\n" \
680         "popl %ebx\n" \
681         "ret\n" \
682     );\
683     extern "C" { \
684         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
685     } \
686     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
687
688 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
689 #define defineUnaryDoubleOpWrapper(function) \
690     asm( \
691         ".text\n" \
692         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
693         HIDE_SYMBOL(function##Thunk) "\n" \
694         SYMBOL_STRING(function##Thunk) ":" "\n" \
695         "subl $20, %esp\n" \
696         "movsd %xmm0, (%esp) \n" \
697         "call " GLOBAL_REFERENCE(function) "\n" \
698         "fstpl (%esp) \n" \
699         "movsd (%esp), %xmm0 \n" \
700         "addl $20, %esp\n" \
701         "ret\n" \
702     );\
703     extern "C" { \
704         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
705     } \
706     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
707
708 #elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
709
710 #define defineUnaryDoubleOpWrapper(function) \
711     asm( \
712         ".text\n" \
713         ".align 2\n" \
714         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
715         HIDE_SYMBOL(function##Thunk) "\n" \
716         ".thumb\n" \
717         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
718         SYMBOL_STRING(function##Thunk) ":" "\n" \
719         "push {lr}\n" \
720         "vmov r0, r1, d0\n" \
721         "blx " GLOBAL_REFERENCE(function) "\n" \
722         "vmov d0, r0, r1\n" \
723         "pop {lr}\n" \
724         "bx lr\n" \
725     ); \
726     extern "C" { \
727         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
728     } \
729     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
730
731 #elif CPU(ARM64)
732
733 #define defineUnaryDoubleOpWrapper(function) \
734     asm( \
735         ".text\n" \
736         ".align 2\n" \
737         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
738         HIDE_SYMBOL(function##Thunk) "\n" \
739         SYMBOL_STRING(function##Thunk) ":" "\n" \
740         "b " GLOBAL_REFERENCE(function) "\n" \
741         ".previous" \
742     ); \
743     extern "C" { \
744         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
745     } \
746     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
747
748 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
749
750 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
751 static double (_cdecl *floorFunction)(double) = floor;
752 static double (_cdecl *ceilFunction)(double) = ceil;
753 static double (_cdecl *truncFunction)(double) = trunc;
754 static double (_cdecl *expFunction)(double) = exp;
755 static double (_cdecl *logFunction)(double) = log;
756 static double (_cdecl *jsRoundFunction)(double) = jsRound;
757
758 #define defineUnaryDoubleOpWrapper(function) \
759     extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
760     { \
761         __asm \
762         { \
763         __asm sub esp, 20 \
764         __asm movsd mmword ptr [esp], xmm0  \
765         __asm call function##Function \
766         __asm fstp qword ptr [esp] \
767         __asm movsd xmm0, mmword ptr [esp] \
768         __asm add esp, 20 \
769         __asm ret \
770         } \
771     } \
772     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
773
774 #else
775
776 #define defineUnaryDoubleOpWrapper(function) \
777     static MathThunk UnaryDoubleOpWrapper(function) = 0
778 #endif
779
780 defineUnaryDoubleOpWrapper(jsRound);
781 defineUnaryDoubleOpWrapper(exp);
782 defineUnaryDoubleOpWrapper(log);
783 defineUnaryDoubleOpWrapper(floor);
784 defineUnaryDoubleOpWrapper(ceil);
785 defineUnaryDoubleOpWrapper(trunc);
786
787 static const double halfConstant = 0.5;
788     
789 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
790 {
791     SpecializedThunkJIT jit(vm, 1);
792     MacroAssembler::Jump nonIntJump;
793     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
794         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
795     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
796     jit.returnInt32(SpecializedThunkJIT::regT0);
797     nonIntJump.link(&jit);
798     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
799
800     if (jit.supportsFloatingPointRounding()) {
801         SpecializedThunkJIT::JumpList doubleResult;
802         jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
803         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
804         jit.returnInt32(SpecializedThunkJIT::regT0);
805         doubleResult.link(&jit);
806         jit.returnDouble(SpecializedThunkJIT::fpRegT0);
807         return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
808     }
809
810     SpecializedThunkJIT::Jump intResult;
811     SpecializedThunkJIT::JumpList doubleResult;
812     if (jit.supportsFloatingPointTruncate()) {
813         jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
814         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
815         SpecializedThunkJIT::JumpList slowPath;
816         // Handle the negative doubles in the slow path for now.
817         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
818         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
819         intResult = jit.jump();
820         slowPath.link(&jit);
821     }
822     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
823     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
824     if (jit.supportsFloatingPointTruncate())
825         intResult.link(&jit);
826     jit.returnInt32(SpecializedThunkJIT::regT0);
827     doubleResult.link(&jit);
828     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
829     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
830 }
831
832 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
833 {
834     SpecializedThunkJIT jit(vm, 1);
835     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
836         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
837     MacroAssembler::Jump nonIntJump;
838     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
839     jit.returnInt32(SpecializedThunkJIT::regT0);
840     nonIntJump.link(&jit);
841     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
842     if (jit.supportsFloatingPointRounding())
843         jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
844     else
845         jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
846
847     SpecializedThunkJIT::JumpList doubleResult;
848     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
849     jit.returnInt32(SpecializedThunkJIT::regT0);
850     doubleResult.link(&jit);
851     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
852     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
853 }
854
855 MacroAssemblerCodeRef truncThunkGenerator(VM* vm)
856 {
857     SpecializedThunkJIT jit(vm, 1);
858     if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
859         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
860     MacroAssembler::Jump nonIntJump;
861     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
862     jit.returnInt32(SpecializedThunkJIT::regT0);
863     nonIntJump.link(&jit);
864     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
865     if (jit.supportsFloatingPointRounding())
866         jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
867     else
868         jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
869
870     SpecializedThunkJIT::JumpList doubleResult;
871     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
872     jit.returnInt32(SpecializedThunkJIT::regT0);
873     doubleResult.link(&jit);
874     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
875     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "trunc");
876 }
877
878 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
879 {
880     SpecializedThunkJIT jit(vm, 1);
881     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
882         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
883     MacroAssembler::Jump nonIntJump;
884     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
885     jit.returnInt32(SpecializedThunkJIT::regT0);
886     nonIntJump.link(&jit);
887     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
888     SpecializedThunkJIT::Jump intResult;
889     SpecializedThunkJIT::JumpList doubleResult;
890     if (jit.supportsFloatingPointTruncate()) {
891         jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
892         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
893         SpecializedThunkJIT::JumpList slowPath;
894         // Handle the negative doubles in the slow path for now.
895         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
896         jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
897         jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
898         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
899         intResult = jit.jump();
900         slowPath.link(&jit);
901     }
902     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
903     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
904     if (jit.supportsFloatingPointTruncate())
905         intResult.link(&jit);
906     jit.returnInt32(SpecializedThunkJIT::regT0);
907     doubleResult.link(&jit);
908     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
909     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
910 }
911
912 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
913 {
914     if (!UnaryDoubleOpWrapper(exp))
915         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
916     SpecializedThunkJIT jit(vm, 1);
917     if (!jit.supportsFloatingPoint())
918         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
919     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
920     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
921     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
922     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
923 }
924
925 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
926 {
927     if (!UnaryDoubleOpWrapper(log))
928         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
929     SpecializedThunkJIT jit(vm, 1);
930     if (!jit.supportsFloatingPoint())
931         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
932     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
933     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
934     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
935     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
936 }
937
938 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
939 {
940     SpecializedThunkJIT jit(vm, 1);
941     if (!jit.supportsFloatingPointAbs())
942         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
943
944 #if USE(JSVALUE64)
945     unsigned virtualRegisterIndex = CallFrame::argumentOffset(0);
946     jit.load64(AssemblyHelpers::addressFor(virtualRegisterIndex), GPRInfo::regT0);
947     MacroAssembler::Jump notInteger = jit.branch64(MacroAssembler::Below, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister);
948
949     // Abs Int32.
950     jit.rshift32(GPRInfo::regT0, MacroAssembler::TrustedImm32(31), GPRInfo::regT1);
951     jit.add32(GPRInfo::regT1, GPRInfo::regT0);
952     jit.xor32(GPRInfo::regT1, GPRInfo::regT0);
953
954     // IntMin cannot be inverted.
955     MacroAssembler::Jump integerIsIntMin = jit.branchTest32(MacroAssembler::Signed, GPRInfo::regT0);
956
957     // Box and finish.
958     jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
959     MacroAssembler::Jump doneWithIntegers = jit.jump();
960
961     // Handle Doubles.
962     notInteger.link(&jit);
963     jit.appendFailure(jit.branchTest64(MacroAssembler::Zero, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister));
964     jit.unboxDoubleWithoutAssertions(GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT0);
965     MacroAssembler::Label absFPR0Label = jit.label();
966     jit.absDouble(FPRInfo::fpRegT0, FPRInfo::fpRegT1);
967     jit.boxDouble(FPRInfo::fpRegT1, GPRInfo::regT0);
968
969     // Tail.
970     doneWithIntegers.link(&jit);
971     jit.returnJSValue(GPRInfo::regT0);
972
973     // We know the value of regT0 is IntMin. We could load that value from memory but
974     // it is simpler to just convert it.
975     integerIsIntMin.link(&jit);
976     jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
977     jit.jump().linkTo(absFPR0Label, &jit);
978 #else
979     MacroAssembler::Jump nonIntJump;
980     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
981     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
982     jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
983     jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
984     jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
985     jit.returnInt32(SpecializedThunkJIT::regT0);
986     nonIntJump.link(&jit);
987     // Shame about the double int conversion here.
988     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
989     jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
990     jit.returnDouble(SpecializedThunkJIT::fpRegT1);
991 #endif
992     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
993 }
994
995 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
996 {
997     SpecializedThunkJIT jit(vm, 2);
998     MacroAssembler::Jump nonIntArg0Jump;
999     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1000     SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1001     MacroAssembler::Jump nonIntArg1Jump;
1002     jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1003     SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1004     jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1005     jit.returnInt32(SpecializedThunkJIT::regT0);
1006
1007     if (jit.supportsFloatingPointTruncate()) {
1008         nonIntArg0Jump.link(&jit);
1009         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1010         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1011         jit.appendFailure(jit.jump());
1012     } else
1013         jit.appendFailure(nonIntArg0Jump);
1014
1015     if (jit.supportsFloatingPointTruncate()) {
1016         nonIntArg1Jump.link(&jit);
1017         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1018         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1019         jit.appendFailure(jit.jump());
1020     } else
1021         jit.appendFailure(nonIntArg1Jump);
1022
1023     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1024 }
1025
1026 MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
1027 {
1028     SpecializedThunkJIT jit(vm, 0);
1029     if (!jit.supportsFloatingPoint())
1030         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1031
1032 #if USE(JSVALUE64)
1033     jit.emitRandomThunk(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
1034     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1035
1036     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
1037 #else
1038     return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1039 #endif
1040 }
1041
1042 MacroAssemblerCodeRef boundThisNoArgsFunctionCallGenerator(VM* vm)
1043 {
1044     CCallHelpers jit(vm);
1045     
1046     jit.emitFunctionPrologue();
1047     
1048     // Set up our call frame.
1049     jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock));
1050     jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCount));
1051
1052     unsigned extraStackNeeded = 0;
1053     if (unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes())
1054         extraStackNeeded = stackAlignmentBytes() - stackMisalignment;
1055     
1056     // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail
1057     // call here as far as I can tell. At least not so long as the generic path doesn't do a tail
1058     // call, since that would be way too weird.
1059     
1060     // The formula for the number of stack bytes needed given some number of parameters (including
1061     // this) is:
1062     //
1063     //     stackAlign((numParams + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC))
1064     //
1065     // Probably we want to write this as:
1066     //
1067     //     stackAlign((numParams + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register))
1068     //
1069     // That's really all there is to this. We have all the registers we need to do it.
1070     
1071     jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCount), GPRInfo::regT1);
1072     jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters), GPRInfo::regT1, GPRInfo::regT2);
1073     jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2);
1074     jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2);
1075     jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2);
1076     
1077     if (extraStackNeeded)
1078         jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2);
1079     
1080     // At this point regT1 has the actual argument count and regT2 has the amount of stack we will
1081     // need.
1082     
1083     jit.subPtr(GPRInfo::regT2, CCallHelpers::stackPointerRegister);
1084
1085     // Do basic callee frame setup, including 'this'.
1086     
1087     jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT3);
1088
1089     jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCount));
1090     
1091     JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT2);
1092     jit.loadValue(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfBoundThis()), valueRegs);
1093     jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0));
1094
1095     jit.loadPtr(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfTargetFunction()), GPRInfo::regT3);
1096     jit.storeCell(GPRInfo::regT3, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
1097     
1098     // OK, now we can start copying. This is a simple matter of copying parameters from the caller's
1099     // frame to the callee's frame. Note that we know that regT1 (the argument count) must be at
1100     // least 1.
1101     jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1102     CCallHelpers::Jump done = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT1);
1103     
1104     CCallHelpers::Label loop = jit.label();
1105     jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1106     jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgument(1)).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight), valueRegs);
1107     jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
1108     jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT1).linkTo(loop, &jit);
1109     
1110     done.link(&jit);
1111     
1112     jit.loadPtr(
1113         CCallHelpers::Address(GPRInfo::regT3, JSFunction::offsetOfExecutable()),
1114         GPRInfo::regT0);
1115     jit.loadPtr(
1116         CCallHelpers::Address(
1117             GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
1118         GPRInfo::regT0);
1119     CCallHelpers::Jump noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0);
1120     
1121     emitPointerValidation(jit, GPRInfo::regT0);
1122     jit.call(GPRInfo::regT0);
1123     
1124     jit.emitFunctionEpilogue();
1125     jit.ret();
1126     
1127     LinkBuffer linkBuffer(*vm, jit, GLOBAL_THUNK_ID);
1128     linkBuffer.link(noCode, CodeLocationLabel(vm->jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
1129     return FINALIZE_CODE(
1130         linkBuffer, ("Specialized thunk for bound function calls with no arguments"));
1131 }
1132
1133 #if ENABLE(WEBASSEMBLY)
1134 MacroAssemblerCodeRef throwExceptionFromWasmThunkGenerator(VM* vm)
1135 {
1136     CCallHelpers jit(vm);
1137
1138     // The thing that jumps here must move ExceptionType into the argumentGPR1 and jump here.
1139     // We're allowed to use temp registers here, but not callee saves.
1140     {
1141         RegisterSet usedRegisters = RegisterSet::stubUnavailableRegisters();
1142         usedRegisters.set(GPRInfo::argumentGPR1);
1143         jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(usedRegisters);
1144     }
1145
1146     jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
1147     CCallHelpers::Call call = jit.call();
1148     jit.jumpToExceptionHandler();
1149
1150     void (*throwWasmException)(ExecState*, Wasm::ExceptionType) = [] (ExecState* exec, Wasm::ExceptionType type) {
1151         VM* vm = &exec->vm();
1152         NativeCallFrameTracer tracer(vm, exec);
1153
1154         {
1155             auto throwScope = DECLARE_THROW_SCOPE(*vm);
1156             JSGlobalObject* globalObject = vm->topJSWebAssemblyInstance->globalObject();
1157
1158             JSWebAssemblyRuntimeError* error = JSWebAssemblyRuntimeError::create(
1159                 exec, globalObject->WebAssemblyRuntimeErrorStructure(), Wasm::errorMessageForExceptionType(type));
1160             throwException(exec, throwScope, error);
1161         }
1162
1163         genericUnwind(vm, exec);
1164         ASSERT(!!vm->callFrameForCatch);
1165     };
1166
1167     LinkBuffer linkBuffer(*vm, jit, GLOBAL_THUNK_ID);
1168     linkBuffer.link(call, throwWasmException);
1169     return FINALIZE_CODE(
1170         linkBuffer, ("Throw exception from Wasm"));
1171 }
1172 #endif // ENABLE(WEBASSEMBLY)
1173
1174 } // namespace JSC
1175
1176 #endif // ENABLE(JIT)