Transition call and construct JITStubs to CCallHelper functions
[WebKit-https.git] / Source / JavaScriptCore / jit / ThunkGenerators.cpp
1 /*
2  * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "JITOperations.h"
31 #include "JSStack.h"
32 #include "Operations.h"
33 #include "SpecializedThunkJIT.h"
34 #include <wtf/InlineASM.h>
35 #include <wtf/StringPrintStream.h>
36 #include <wtf/text/StringImpl.h>
37
38 #if ENABLE(JIT)
39
40 namespace JSC {
41
42 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
43 {
44 #if !ASSERT_DISABLED
45     CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
46     jit.breakpoint();
47     isNonZero.link(&jit);
48     jit.push(pointerGPR);
49     jit.load8(pointerGPR, pointerGPR);
50     jit.pop(pointerGPR);
51 #else
52     UNUSED_PARAM(jit);
53     UNUSED_PARAM(pointerGPR);
54 #endif
55 }
56
57 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
58 {
59     CCallHelpers jit(vm);
60     
61     // We will jump to here if the JIT code thinks it's making a call, but the
62     // linking helper (C++ code) decided to throw an exception instead. We will
63     // have saved the callReturnIndex in the first arguments of JITStackFrame.
64     // Note that the return address will be on the stack at this point, so we
65     // need to remove it and drop it on the floor, since we don't care about it.
66     // Finally note that the call frame register points at the callee frame, so
67     // we need to pop it.
68     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
69     jit.loadPtr(
70         CCallHelpers::Address(
71             GPRInfo::callFrameRegister,
72             static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::CallerFrame),
73         GPRInfo::callFrameRegister);
74 #if USE(JSVALUE64)
75     jit.peek64(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
76 #else
77     jit.peek(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
78 #endif
79     jit.setupArgumentsWithExecState(GPRInfo::nonPreservedNonReturnGPR);
80     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
81     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
82     jit.call(GPRInfo::nonArgGPR0);
83     emitPointerValidation(jit, GPRInfo::returnValueGPR2);
84     jit.jump(GPRInfo::returnValueGPR2);
85     
86     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
87     return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
88 }
89
90 static void slowPathFor(
91     CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
92 {
93     jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2);
94     emitPointerValidation(jit, GPRInfo::nonArgGPR2);
95     jit.storePtr(
96         GPRInfo::nonArgGPR2,
97         CCallHelpers::Address(
98             GPRInfo::callFrameRegister,
99             static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC));
100     jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
101 #if USE(JSVALUE64)
102     jit.poke64(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
103 #else
104     jit.poke(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
105 #endif
106     jit.setupArgumentsExecState();
107     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
108     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
109     jit.call(GPRInfo::nonArgGPR0);
110     
111     // This slow call will return the address of one of the following:
112     // 1) Exception throwing thunk.
113     // 2) Host call return value returner thingy.
114     // 3) The function to call.
115     jit.loadPtr(
116         CCallHelpers::Address(
117             GPRInfo::callFrameRegister,
118             static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC),
119         GPRInfo::nonPreservedNonReturnGPR);
120     jit.storePtr(
121         CCallHelpers::TrustedImmPtr(0),
122         CCallHelpers::Address(
123             GPRInfo::callFrameRegister,
124             static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC));
125     emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR);
126     jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR);
127     emitPointerValidation(jit, GPRInfo::returnValueGPR);
128     jit.jump(GPRInfo::returnValueGPR);
129 }
130
131 static MacroAssemblerCodeRef linkForThunkGenerator(
132     VM* vm, CodeSpecializationKind kind)
133 {
134     // The return address is on the stack or in the link register. We will hence
135     // save the return address to the call frame while we make a C++ function call
136     // to perform linking and lazy compilation if necessary. We expect the callee
137     // to be in nonArgGPR0/nonArgGPR1 (payload/tag), the call frame to have already
138     // been adjusted, nonPreservedNonReturnGPR holds the exception handler index,
139     // and all other registers to be available for use. We use JITStackFrame::args
140     // to save important information across calls.
141     
142     CCallHelpers jit(vm);
143     
144     slowPathFor(jit, vm, kind == CodeForCall ? operationLinkCall : operationLinkConstruct);
145     
146     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
147     return FINALIZE_CODE(
148         patchBuffer,
149         ("Link %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
150 }
151
152 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
153 {
154     return linkForThunkGenerator(vm, CodeForCall);
155 }
156
157 MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
158 {
159     return linkForThunkGenerator(vm, CodeForConstruct);
160 }
161
162 // For closure optimizations, we only include calls, since if you're using closures for
163 // object construction then you're going to lose big time anyway.
164 MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm)
165 {
166     CCallHelpers jit(vm);
167     
168     slowPathFor(jit, vm, operationLinkClosureCall);
169     
170     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
171     return FINALIZE_CODE(patchBuffer, ("Link closure call slow path thunk"));
172 }
173
174 static MacroAssemblerCodeRef virtualForThunkGenerator(
175     VM* vm, CodeSpecializationKind kind)
176 {
177     // The return address is on the stack, or in the link register. We will hence
178     // jump to the callee, or save the return address to the call frame while we
179     // make a C++ function call to the appropriate JIT operation.
180
181     CCallHelpers jit(vm);
182     
183     CCallHelpers::JumpList slowCase;
184
185     // FIXME: we should have a story for eliminating these checks. In many cases,
186     // the DFG knows that the value is definitely a cell, or definitely a function.
187     
188 #if USE(JSVALUE64)
189     slowCase.append(
190         jit.branchTest64(
191             CCallHelpers::NonZero, GPRInfo::nonArgGPR0, GPRInfo::tagMaskRegister));
192 #else
193     slowCase.append(
194         jit.branch32(
195             CCallHelpers::NotEqual, GPRInfo::nonArgGPR1,
196             CCallHelpers::TrustedImm32(JSValue::CellTag)));
197 #endif
198     jit.loadPtr(CCallHelpers::Address(GPRInfo::nonArgGPR0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
199     slowCase.append(
200         jit.branchPtr(
201             CCallHelpers::NotEqual,
202             CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
203             CCallHelpers::TrustedImmPtr(JSFunction::info())));
204     
205     // Now we know we have a JSFunction.
206     
207     jit.loadPtr(
208         CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfExecutable()),
209         GPRInfo::nonArgGPR2);
210     slowCase.append(
211         jit.branch32(
212             CCallHelpers::LessThan,
213             CCallHelpers::Address(
214                 GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
215             CCallHelpers::TrustedImm32(0)));
216     
217     // Now we know that we have a CodeBlock, and we're committed to making a fast
218     // call.
219     
220     jit.loadPtr(
221         CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfScopeChain()),
222         GPRInfo::nonArgGPR1);
223 #if USE(JSVALUE64)
224     jit.store64(
225         GPRInfo::nonArgGPR1,
226         CCallHelpers::Address(
227             GPRInfo::callFrameRegister,
228             static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
229 #else
230     jit.storePtr(
231         GPRInfo::nonArgGPR1,
232         CCallHelpers::Address(
233             GPRInfo::callFrameRegister,
234             static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
235             OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
236     jit.store32(
237         CCallHelpers::TrustedImm32(JSValue::CellTag),
238         CCallHelpers::Address(
239             GPRInfo::callFrameRegister,
240             static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
241             OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
242 #endif
243     
244     jit.loadPtr(
245         CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
246         GPRInfo::regT0);
247     
248     // Make a tail call. This will return back to JIT code.
249     emitPointerValidation(jit, GPRInfo::regT0);
250     jit.jump(GPRInfo::regT0);
251
252     slowCase.link(&jit);
253     
254     // Here we don't know anything, so revert to the full slow path.
255     
256     slowPathFor(jit, vm, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
257     
258     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
259     return FINALIZE_CODE(
260         patchBuffer,
261         ("Virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
262 }
263
264 MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
265 {
266     return virtualForThunkGenerator(vm, CodeForCall);
267 }
268
269 MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
270 {
271     return virtualForThunkGenerator(vm, CodeForConstruct);
272 }
273
274 MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM* vm)
275 {
276     JSInterfaceJIT jit(vm);
277     
278 #if USE(JSVALUE64)
279     // Check eax is a string
280     JSInterfaceJIT::Jump failureCases1 = jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0);
281     JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
282         JSInterfaceJIT::NotEqual, JSInterfaceJIT::Address(
283             JSInterfaceJIT::regT0, JSCell::structureOffset()),
284         JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
285
286     // Checks out okay! - get the length from the Ustring.
287     jit.load32(
288         JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
289         JSInterfaceJIT::regT0);
290
291     JSInterfaceJIT::Jump failureCases3 = jit.branch32(
292         JSInterfaceJIT::LessThan, JSInterfaceJIT::regT0, JSInterfaceJIT::TrustedImm32(0));
293
294     // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
295     jit.emitFastArithIntToImmNoCheck(JSInterfaceJIT::regT0, JSInterfaceJIT::regT0);
296     
297 #else // USE(JSVALUE64)
298     // regT0 holds payload, regT1 holds tag
299
300     JSInterfaceJIT::Jump failureCases1 = jit.branch32(
301         JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1,
302         JSInterfaceJIT::TrustedImm32(JSValue::CellTag));
303     JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
304         JSInterfaceJIT::NotEqual,
305         JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSCell::structureOffset()),
306         JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
307
308     // Checks out okay! - get the length from the Ustring.
309     jit.load32(
310         JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
311         JSInterfaceJIT::regT2);
312
313     JSInterfaceJIT::Jump failureCases3 = jit.branch32(
314         JSInterfaceJIT::Above, JSInterfaceJIT::regT2, JSInterfaceJIT::TrustedImm32(INT_MAX));
315     jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::regT0);
316     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::Int32Tag), JSInterfaceJIT::regT1);
317 #endif // USE(JSVALUE64)
318
319     jit.ret();
320     
321     JSInterfaceJIT::Call failureCases1Call = jit.makeTailRecursiveCall(failureCases1);
322     JSInterfaceJIT::Call failureCases2Call = jit.makeTailRecursiveCall(failureCases2);
323     JSInterfaceJIT::Call failureCases3Call = jit.makeTailRecursiveCall(failureCases3);
324     
325     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
326     
327     patchBuffer.link(failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
328     patchBuffer.link(failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
329     patchBuffer.link(failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
330     
331     return FINALIZE_CODE(patchBuffer, ("string length trampoline"));
332 }
333
334 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
335 {
336     int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
337     
338     JSInterfaceJIT jit(vm);
339     
340     jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
341     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
342
343 #if CPU(X86)
344     // Load caller frame's scope chain into this callframe so that whatever we call can
345     // get to its global data.
346     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
347     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
348     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
349
350     jit.peek(JSInterfaceJIT::regT1);
351     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
352
353     // Calling convention:      f(ecx, edx, ...);
354     // Host function signature: f(ExecState*);
355     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
356
357     jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
358
359     // call the function
360     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
361     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
362     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
363     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
364
365     jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
366
367 #elif CPU(X86_64)
368     // Load caller frame's scope chain into this callframe so that whatever we call can
369     // get to its global data.
370     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
371     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
372     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
373
374     jit.peek(JSInterfaceJIT::regT1);
375     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
376
377 #if !OS(WINDOWS)
378     // Calling convention:      f(edi, esi, edx, ecx, ...);
379     // Host function signature: f(ExecState*);
380     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
381
382     jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
383
384     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
385     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
386     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
387     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
388
389     jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
390 #else
391     // Calling convention:      f(ecx, edx, r8, r9, ...);
392     // Host function signature: f(ExecState*);
393     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
394
395     // Leave space for the callee parameter home addresses and align the stack.
396     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
397
398     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
399     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
400     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
401     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
402
403     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
404 #endif
405
406 #elif CPU(ARM)
407     // Load caller frame's scope chain into this callframe so that whatever we call can
408     // get to its global data.
409     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
410     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
411     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
412
413     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
414     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
415
416     // Calling convention:      f(r0 == regT0, r1 == regT1, ...);
417     // Host function signature: f(ExecState*);
418     jit.move(JSInterfaceJIT::callFrameRegister, ARMRegisters::r0);
419
420     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
421     jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
422     jit.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
423     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
424
425     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
426
427 #elif CPU(SH4)
428     // Load caller frame's scope chain into this callframe so that whatever we call can
429     // get to its global data.
430     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
431     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
432     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
433
434     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
435     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
436
437     // Calling convention: f(r0 == regT4, r1 == regT5, ...);
438     // Host function signature: f(ExecState*);
439     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT4);
440
441     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT5);
442     jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
443     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
444
445     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSInterfaceJIT::regT0);
446     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
447
448 #elif CPU(MIPS)
449     // Load caller frame's scope chain into this callframe so that whatever we call can
450     // get to its global data.
451     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
452     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
453     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
454
455     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
456     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
457
458     // Calling convention:      f(a0, a1, a2, a3);
459     // Host function signature: f(ExecState*);
460
461     // Allocate stack space for 16 bytes (8-byte aligned)
462     // 16 bytes (unused) for 4 arguments
463     jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
464
465     // Setup arg0
466     jit.move(JSInterfaceJIT::callFrameRegister, MIPSRegisters::a0);
467
468     // Call
469     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
470     jit.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
471     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
472     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
473
474     // Restore stack space
475     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
476
477     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
478 #else
479 #error "JIT not supported on this platform."
480     UNUSED_PARAM(executableOffsetToFunction);
481     breakpoint();
482 #endif
483
484     // Check for an exception
485 #if USE(JSVALUE64)
486     jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
487     JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
488 #else
489     JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
490         JSInterfaceJIT::NotEqual,
491         JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
492         JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
493 #endif
494
495     // Return.
496     jit.ret();
497
498     // Handle an exception
499     exceptionHandler.link(&jit);
500
501     // Grab the return address.
502     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
503     
504     jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
505     jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
506
507     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
508
509     jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMHandleException).value()), JSInterfaceJIT::regT1);
510     jit.jump(JSInterfaceJIT::regT1);
511
512     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
513     return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
514 }
515
516 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
517 {
518     return nativeForGenerator(vm, CodeForCall);
519 }
520
521 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
522 {
523     return nativeForGenerator(vm, CodeForConstruct);
524 }
525
526 MacroAssemblerCodeRef arityFixup(VM* vm)
527 {
528     JSInterfaceJIT jit(vm);
529
530     // We enter with fixup count in regT0
531 #if USE(JSVALUE64)
532 #  if CPU(X86_64)
533     jit.pop(JSInterfaceJIT::regT4);
534 #  endif
535     jit.neg64(JSInterfaceJIT::regT0);
536     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
537     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2);
538     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
539
540     // Move current frame down regT0 number of slots
541     JSInterfaceJIT::Label copyLoop(jit.label());
542     jit.load64(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
543     jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
544     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
545     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
546
547     // Fill in regT0 missing arg slots with undefined
548     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
549     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
550     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
551     jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
552     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
553     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
554
555     // Adjust call frame register to account for missing args
556     jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0);
557     jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister);
558
559 #  if CPU(X86_64)
560     jit.push(JSInterfaceJIT::regT4);
561 #  endif
562     jit.ret();
563 #else
564 #  if CPU(X86)
565     jit.pop(JSInterfaceJIT::regT4);
566 #  endif
567     jit.neg32(JSInterfaceJIT::regT0);
568     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
569     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2);
570     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
571
572     // Move current frame down regT0 number of slots
573     JSInterfaceJIT::Label copyLoop(jit.label());
574     jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
575     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
576     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
577     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
578     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
579     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
580
581     // Fill in regT0 missing arg slots with undefined
582     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
583     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
584     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
585     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
586     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
587     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
588
589     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
590     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
591
592     // Adjust call frame register to account for missing args
593     jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0);
594     jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister);
595
596 #  if CPU(X86)
597     jit.push(JSInterfaceJIT::regT4);
598 #  endif
599     jit.ret();
600 #endif
601
602     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
603     return FINALIZE_CODE(patchBuffer, ("fixup arity"));
604 }
605
606 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
607 {
608     // load string
609     jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
610
611     // Load string length to regT2, and start the process of loading the data pointer into regT0
612     jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
613     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
614     jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
615
616     // load index
617     jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
618
619     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
620     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
621
622     // Load the character
623     SpecializedThunkJIT::JumpList is16Bit;
624     SpecializedThunkJIT::JumpList cont8Bit;
625     // Load the string flags
626     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
627     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
628     is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
629     jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
630     cont8Bit.append(jit.jump());
631     is16Bit.link(&jit);
632     jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
633     cont8Bit.link(&jit);
634 }
635
636 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
637 {
638     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
639     jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
640     jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
641     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
642 }
643
644 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
645 {
646     SpecializedThunkJIT jit(vm, 1);
647     stringCharLoad(jit, vm);
648     jit.returnInt32(SpecializedThunkJIT::regT0);
649     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
650 }
651
652 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
653 {
654     SpecializedThunkJIT jit(vm, 1);
655     stringCharLoad(jit, vm);
656     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
657     jit.returnJSCell(SpecializedThunkJIT::regT0);
658     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charAt");
659 }
660
661 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
662 {
663     SpecializedThunkJIT jit(vm, 1);
664     // load char code
665     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
666     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
667     jit.returnJSCell(SpecializedThunkJIT::regT0);
668     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
669 }
670
671 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
672 {
673     SpecializedThunkJIT jit(vm, 1);
674     if (!jit.supportsFloatingPointSqrt())
675         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
676
677     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
678     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
679     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
680     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "sqrt");
681 }
682
683
684 #define UnaryDoubleOpWrapper(function) function##Wrapper
685 enum MathThunkCallingConvention { };
686 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
687 extern "C" {
688
689 double jsRound(double) REFERENCED_FROM_ASM;
690 double jsRound(double d)
691 {
692     double integer = ceil(d);
693     return integer - (integer - d > 0.5);
694 }
695
696 }
697
698 #if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
699
700 #define defineUnaryDoubleOpWrapper(function) \
701     asm( \
702         ".text\n" \
703         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
704         HIDE_SYMBOL(function##Thunk) "\n" \
705         SYMBOL_STRING(function##Thunk) ":" "\n" \
706         "call " GLOBAL_REFERENCE(function) "\n" \
707         "ret\n" \
708     );\
709     extern "C" { \
710         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
711     } \
712     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
713
714 #elif CPU(X86) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
715 #define defineUnaryDoubleOpWrapper(function) \
716     asm( \
717         ".text\n" \
718         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
719         HIDE_SYMBOL(function##Thunk) "\n" \
720         SYMBOL_STRING(function##Thunk) ":" "\n" \
721         "subl $8, %esp\n" \
722         "movsd %xmm0, (%esp) \n" \
723         "call " GLOBAL_REFERENCE(function) "\n" \
724         "fstpl (%esp) \n" \
725         "movsd (%esp), %xmm0 \n" \
726         "addl $8, %esp\n" \
727         "ret\n" \
728     );\
729     extern "C" { \
730         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
731     } \
732     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
733
734 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
735
736 #define defineUnaryDoubleOpWrapper(function) \
737     asm( \
738         ".text\n" \
739         ".align 2\n" \
740         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
741         HIDE_SYMBOL(function##Thunk) "\n" \
742         ".thumb\n" \
743         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
744         SYMBOL_STRING(function##Thunk) ":" "\n" \
745         "push {lr}\n" \
746         "vmov r0, r1, d0\n" \
747         "blx " GLOBAL_REFERENCE(function) "\n" \
748         "vmov d0, r0, r1\n" \
749         "pop {lr}\n" \
750         "bx lr\n" \
751     ); \
752     extern "C" { \
753         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
754     } \
755     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
756 #else
757
758 #define defineUnaryDoubleOpWrapper(function) \
759     static MathThunk UnaryDoubleOpWrapper(function) = 0
760 #endif
761
762 defineUnaryDoubleOpWrapper(jsRound);
763 defineUnaryDoubleOpWrapper(exp);
764 defineUnaryDoubleOpWrapper(log);
765 defineUnaryDoubleOpWrapper(floor);
766 defineUnaryDoubleOpWrapper(ceil);
767
768 static const double oneConstant = 1.0;
769 static const double negativeHalfConstant = -0.5;
770 static const double zeroConstant = 0.0;
771 static const double halfConstant = 0.5;
772     
773 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
774 {
775     SpecializedThunkJIT jit(vm, 1);
776     MacroAssembler::Jump nonIntJump;
777     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
778         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
779     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
780     jit.returnInt32(SpecializedThunkJIT::regT0);
781     nonIntJump.link(&jit);
782     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
783     SpecializedThunkJIT::Jump intResult;
784     SpecializedThunkJIT::JumpList doubleResult;
785     if (jit.supportsFloatingPointTruncate()) {
786         jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
787         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
788         SpecializedThunkJIT::JumpList slowPath;
789         // Handle the negative doubles in the slow path for now.
790         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
791         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
792         intResult = jit.jump();
793         slowPath.link(&jit);
794     }
795     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
796     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
797     if (jit.supportsFloatingPointTruncate())
798         intResult.link(&jit);
799     jit.returnInt32(SpecializedThunkJIT::regT0);
800     doubleResult.link(&jit);
801     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
802     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "floor");
803 }
804
805 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
806 {
807     SpecializedThunkJIT jit(vm, 1);
808     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
809         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
810     MacroAssembler::Jump nonIntJump;
811     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
812     jit.returnInt32(SpecializedThunkJIT::regT0);
813     nonIntJump.link(&jit);
814     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
815     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
816     SpecializedThunkJIT::JumpList doubleResult;
817     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
818     jit.returnInt32(SpecializedThunkJIT::regT0);
819     doubleResult.link(&jit);
820     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
821     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "ceil");
822 }
823
824 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
825 {
826     SpecializedThunkJIT jit(vm, 1);
827     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
828         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
829     MacroAssembler::Jump nonIntJump;
830     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
831     jit.returnInt32(SpecializedThunkJIT::regT0);
832     nonIntJump.link(&jit);
833     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
834     SpecializedThunkJIT::Jump intResult;
835     SpecializedThunkJIT::JumpList doubleResult;
836     if (jit.supportsFloatingPointTruncate()) {
837         jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
838         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
839         SpecializedThunkJIT::JumpList slowPath;
840         // Handle the negative doubles in the slow path for now.
841         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
842         jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
843         jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
844         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
845         intResult = jit.jump();
846         slowPath.link(&jit);
847     }
848     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
849     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
850     if (jit.supportsFloatingPointTruncate())
851         intResult.link(&jit);
852     jit.returnInt32(SpecializedThunkJIT::regT0);
853     doubleResult.link(&jit);
854     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
855     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "round");
856 }
857
858 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
859 {
860     if (!UnaryDoubleOpWrapper(exp))
861         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
862     SpecializedThunkJIT jit(vm, 1);
863     if (!jit.supportsFloatingPoint())
864         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
865     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
866     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
867     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
868     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "exp");
869 }
870
871 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
872 {
873     if (!UnaryDoubleOpWrapper(log))
874         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
875     SpecializedThunkJIT jit(vm, 1);
876     if (!jit.supportsFloatingPoint())
877         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
878     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
879     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
880     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
881     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "log");
882 }
883
884 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
885 {
886     SpecializedThunkJIT jit(vm, 1);
887     if (!jit.supportsFloatingPointAbs())
888         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
889     MacroAssembler::Jump nonIntJump;
890     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
891     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
892     jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
893     jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
894     jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
895     jit.returnInt32(SpecializedThunkJIT::regT0);
896     nonIntJump.link(&jit);
897     // Shame about the double int conversion here.
898     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
899     jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
900     jit.returnDouble(SpecializedThunkJIT::fpRegT1);
901     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "abs");
902 }
903
904 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
905 {
906     SpecializedThunkJIT jit(vm, 2);
907     if (!jit.supportsFloatingPoint())
908         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
909
910     jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
911     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
912     MacroAssembler::Jump nonIntExponent;
913     jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
914     jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
915     
916     MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
917     MacroAssembler::Label startLoop(jit.label());
918
919     MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
920     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
921     exponentIsEven.link(&jit);
922     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
923     jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
924     jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
925
926     exponentIsZero.link(&jit);
927
928     {
929         SpecializedThunkJIT::JumpList doubleResult;
930         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
931         jit.returnInt32(SpecializedThunkJIT::regT0);
932         doubleResult.link(&jit);
933         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
934     }
935
936     if (jit.supportsFloatingPointSqrt()) {
937         nonIntExponent.link(&jit);
938         jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
939         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
940         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
941         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
942         jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
943         jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
944
945         SpecializedThunkJIT::JumpList doubleResult;
946         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
947         jit.returnInt32(SpecializedThunkJIT::regT0);
948         doubleResult.link(&jit);
949         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
950     } else
951         jit.appendFailure(nonIntExponent);
952
953     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "pow");
954 }
955
956 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
957 {
958     SpecializedThunkJIT jit(vm, 2);
959     MacroAssembler::Jump nonIntArg0Jump;
960     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
961     SpecializedThunkJIT::Label doneLoadingArg0(&jit);
962     MacroAssembler::Jump nonIntArg1Jump;
963     jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
964     SpecializedThunkJIT::Label doneLoadingArg1(&jit);
965     jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
966     jit.returnInt32(SpecializedThunkJIT::regT0);
967
968     if (jit.supportsFloatingPointTruncate()) {
969         nonIntArg0Jump.link(&jit);
970         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
971         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
972         jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
973         jit.jump(doneLoadingArg0);
974     } else
975         jit.appendFailure(nonIntArg0Jump);
976
977     if (jit.supportsFloatingPointTruncate()) {
978         nonIntArg1Jump.link(&jit);
979         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
980         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
981         jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
982         jit.jump(doneLoadingArg1);
983     } else
984         jit.appendFailure(nonIntArg1Jump);
985
986     return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "imul");
987 }
988
989 }
990
991 #endif // ENABLE(JIT)