fourthTier: JITCode should abstract exactly how the JIT code is structured and where...
[WebKit-https.git] / Source / JavaScriptCore / jit / ThunkGenerators.cpp
1 /*
2  * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "Operations.h"
31 #include "SpecializedThunkJIT.h"
32 #include <wtf/InlineASM.h>
33 #include <wtf/StringPrintStream.h>
34 #include <wtf/text/StringImpl.h>
35
36 #if ENABLE(JIT)
37
38 namespace JSC {
39
40 static JSInterfaceJIT::Call generateSlowCaseFor(VM* vm, JSInterfaceJIT& jit)
41 {
42     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
43     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT2, JSInterfaceJIT::regT2);
44     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT2, JSStack::ScopeChain);
45
46     // Also initialize ReturnPC and CodeBlock, like a JS function would.
47     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
48     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
49     jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
50
51     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
52     jit.restoreArgumentReference();
53     JSInterfaceJIT::Call callNotJSFunction = jit.call();
54     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::callFrameRegister);
55     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
56     jit.ret();
57     
58     return callNotJSFunction;
59 }
60
61 static MacroAssemblerCodeRef linkForGenerator(VM* vm, FunctionPtr lazyLink, FunctionPtr notJSFunction, const char* name)
62 {
63     JSInterfaceJIT jit;
64     
65     JSInterfaceJIT::JumpList slowCase;
66     
67 #if USE(JSVALUE64)
68     slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
69     slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
70 #else // USE(JSVALUE64)
71     slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
72     slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
73 #endif // USE(JSVALUE64)
74
75     // Finish canonical initialization before JS function call.
76     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
77     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
78
79     // Also initialize ReturnPC for use by lazy linking and exceptions.
80     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
81     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
82     
83     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
84     jit.restoreArgumentReference();
85     JSInterfaceJIT::Call callLazyLink = jit.call();
86     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
87     jit.jump(JSInterfaceJIT::regT0);
88     
89     slowCase.link(&jit);
90     JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
91     
92     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
93     patchBuffer.link(callLazyLink, lazyLink);
94     patchBuffer.link(callNotJSFunction, notJSFunction);
95     
96     return FINALIZE_CODE(patchBuffer, ("link %s trampoline", name));
97 }
98
99 MacroAssemblerCodeRef linkCallGenerator(VM* vm)
100 {
101     return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkCall), FunctionPtr(cti_op_call_NotJSFunction), "call");
102 }
103
104 MacroAssemblerCodeRef linkConstructGenerator(VM* vm)
105 {
106     return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkConstruct), FunctionPtr(cti_op_construct_NotJSConstruct), "construct");
107 }
108
109 MacroAssemblerCodeRef linkClosureCallGenerator(VM* vm)
110 {
111     return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkClosureCall), FunctionPtr(cti_op_call_NotJSFunction), "closure call");
112 }
113
114 static MacroAssemblerCodeRef virtualForGenerator(VM* vm, FunctionPtr compile, FunctionPtr notJSFunction, const char* name, CodeSpecializationKind kind)
115 {
116     JSInterfaceJIT jit;
117     
118     JSInterfaceJIT::JumpList slowCase;
119
120 #if USE(JSVALUE64)    
121     slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
122 #else // USE(JSVALUE64)
123     slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
124 #endif // USE(JSVALUE64)
125     slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
126
127     // Finish canonical initialization before JS function call.
128     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
129     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
130
131     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
132     JSInterfaceJIT::Jump hasCodeBlock1 = jit.branch32(JSInterfaceJIT::GreaterThanOrEqual, JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfNumParametersFor(kind)), JSInterfaceJIT::TrustedImm32(0));
133     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
134     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
135     jit.restoreArgumentReference();
136     JSInterfaceJIT::Call callCompile = jit.call();
137     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
138     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
139
140     hasCodeBlock1.link(&jit);
141     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfJITCodeWithArityCheckFor(kind)), JSInterfaceJIT::regT0);
142 #if !ASSERT_DISABLED
143     JSInterfaceJIT::Jump ok = jit.branchTestPtr(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT0);
144     jit.breakpoint();
145     ok.link(&jit);
146 #endif
147     jit.jump(JSInterfaceJIT::regT0);
148     
149     slowCase.link(&jit);
150     JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
151     
152     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
153     patchBuffer.link(callCompile, compile);
154     patchBuffer.link(callNotJSFunction, notJSFunction);
155     
156     return FINALIZE_CODE(patchBuffer, ("virtual %s trampoline", name));
157 }
158
159 MacroAssemblerCodeRef virtualCallGenerator(VM* vm)
160 {
161     return virtualForGenerator(vm, FunctionPtr(cti_op_call_jitCompile), FunctionPtr(cti_op_call_NotJSFunction), "call", CodeForCall);
162 }
163
164 MacroAssemblerCodeRef virtualConstructGenerator(VM* vm)
165 {
166     return virtualForGenerator(vm, FunctionPtr(cti_op_construct_jitCompile), FunctionPtr(cti_op_construct_NotJSConstruct), "construct", CodeForConstruct);
167 }
168
169 MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM* vm)
170 {
171     JSInterfaceJIT jit;
172     
173 #if USE(JSVALUE64)
174     // Check eax is a string
175     JSInterfaceJIT::Jump failureCases1 = jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0);
176     JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
177         JSInterfaceJIT::NotEqual, JSInterfaceJIT::Address(
178             JSInterfaceJIT::regT0, JSCell::structureOffset()),
179         JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
180
181     // Checks out okay! - get the length from the Ustring.
182     jit.load32(
183         JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
184         JSInterfaceJIT::regT0);
185
186     JSInterfaceJIT::Jump failureCases3 = jit.branch32(
187         JSInterfaceJIT::LessThan, JSInterfaceJIT::regT0, JSInterfaceJIT::TrustedImm32(0));
188
189     // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
190     jit.emitFastArithIntToImmNoCheck(JSInterfaceJIT::regT0, JSInterfaceJIT::regT0);
191     
192 #else // USE(JSVALUE64)
193     // regT0 holds payload, regT1 holds tag
194
195     JSInterfaceJIT::Jump failureCases1 = jit.branch32(
196         JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1,
197         JSInterfaceJIT::TrustedImm32(JSValue::CellTag));
198     JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
199         JSInterfaceJIT::NotEqual,
200         JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSCell::structureOffset()),
201         JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
202
203     // Checks out okay! - get the length from the Ustring.
204     jit.load32(
205         JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
206         JSInterfaceJIT::regT2);
207
208     JSInterfaceJIT::Jump failureCases3 = jit.branch32(
209         JSInterfaceJIT::Above, JSInterfaceJIT::regT2, JSInterfaceJIT::TrustedImm32(INT_MAX));
210     jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::regT0);
211     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::Int32Tag), JSInterfaceJIT::regT1);
212 #endif // USE(JSVALUE64)
213
214     jit.ret();
215     
216     JSInterfaceJIT::Call failureCases1Call = jit.makeTailRecursiveCall(failureCases1);
217     JSInterfaceJIT::Call failureCases2Call = jit.makeTailRecursiveCall(failureCases2);
218     JSInterfaceJIT::Call failureCases3Call = jit.makeTailRecursiveCall(failureCases3);
219     
220     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
221     
222     patchBuffer.link(failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
223     patchBuffer.link(failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
224     patchBuffer.link(failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
225     
226     return FINALIZE_CODE(patchBuffer, ("string length trampoline"));
227 }
228
229 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
230 {
231     int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
232     
233     JSInterfaceJIT jit;
234     
235     jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
236     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
237
238 #if CPU(X86)
239     // Load caller frame's scope chain into this callframe so that whatever we call can
240     // get to its global data.
241     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
242     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
243     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
244
245     jit.peek(JSInterfaceJIT::regT1);
246     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
247
248     // Calling convention:      f(ecx, edx, ...);
249     // Host function signature: f(ExecState*);
250     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
251
252     jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
253
254     // call the function
255     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
256     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
257     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
258     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
259
260     jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
261
262 #elif CPU(X86_64)
263     // Load caller frame's scope chain into this callframe so that whatever we call can
264     // get to its global data.
265     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
266     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
267     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
268
269     jit.peek(JSInterfaceJIT::regT1);
270     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
271
272 #if !OS(WINDOWS)
273     // Calling convention:      f(edi, esi, edx, ecx, ...);
274     // Host function signature: f(ExecState*);
275     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
276
277     jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
278
279     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
280     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
281     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
282     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
283
284     jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
285 #else
286     // Calling convention:      f(ecx, edx, r8, r9, ...);
287     // Host function signature: f(ExecState*);
288     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
289
290     // Leave space for the callee parameter home addresses and align the stack.
291     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
292
293     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
294     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
295     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
296     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
297
298     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
299 #endif
300
301 #elif CPU(ARM)
302     // Load caller frame's scope chain into this callframe so that whatever we call can
303     // get to its global data.
304     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
305     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
306     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
307
308     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
309     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
310
311     // Calling convention:      f(r0 == regT0, r1 == regT1, ...);
312     // Host function signature: f(ExecState*);
313     jit.move(JSInterfaceJIT::callFrameRegister, ARMRegisters::r0);
314
315     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
316     jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
317     jit.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
318     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
319
320     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
321
322 #elif CPU(SH4)
323     // Load caller frame's scope chain into this callframe so that whatever we call can
324     // get to its global data.
325     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
326     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
327     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
328
329     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
330     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
331
332     // Calling convention: f(r0 == regT4, r1 == regT5, ...);
333     // Host function signature: f(ExecState*);
334     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT4);
335
336     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT5);
337     jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
338     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
339
340     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSInterfaceJIT::regT0);
341     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
342
343 #elif CPU(MIPS)
344     // Load caller frame's scope chain into this callframe so that whatever we call can
345     // get to its global data.
346     jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
347     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
348     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
349
350     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
351     jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
352
353     // Calling convention:      f(a0, a1, a2, a3);
354     // Host function signature: f(ExecState*);
355
356     // Allocate stack space for 16 bytes (8-byte aligned)
357     // 16 bytes (unused) for 4 arguments
358     jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
359
360     // Setup arg0
361     jit.move(JSInterfaceJIT::callFrameRegister, MIPSRegisters::a0);
362
363     // Call
364     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
365     jit.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
366     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
367     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
368
369     // Restore stack space
370     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
371
372     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
373 #else
374 #error "JIT not supported on this platform."
375     UNUSED_PARAM(executableOffsetToFunction);
376     breakpoint();
377 #endif
378
379     // Check for an exception
380 #if USE(JSVALUE64)
381     jit.load64(&(vm->exception), JSInterfaceJIT::regT2);
382     JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
383 #else
384     JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
385         JSInterfaceJIT::NotEqual,
386         JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
387         JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
388 #endif
389
390     // Return.
391     jit.ret();
392
393     // Handle an exception
394     exceptionHandler.link(&jit);
395
396     // Grab the return address.
397     jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
398
399     jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
400     jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
401     jit.poke(JSInterfaceJIT::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
402
403     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
404     // Set the return address.
405     jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), JSInterfaceJIT::regT1);
406     jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT1);
407
408     jit.ret();
409
410     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
411     return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
412 }
413
414 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
415 {
416     return nativeForGenerator(vm, CodeForCall);
417 }
418
419 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
420 {
421     return nativeForGenerator(vm, CodeForConstruct);
422 }
423
424 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
425 {
426     // load string
427     jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
428
429     // Load string length to regT2, and start the process of loading the data pointer into regT0
430     jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
431     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
432     jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
433
434     // load index
435     jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
436
437     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
438     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
439
440     // Load the character
441     SpecializedThunkJIT::JumpList is16Bit;
442     SpecializedThunkJIT::JumpList cont8Bit;
443     // Load the string flags
444     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
445     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
446     is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
447     jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
448     cont8Bit.append(jit.jump());
449     is16Bit.link(&jit);
450     jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
451     cont8Bit.link(&jit);
452 }
453
454 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
455 {
456     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
457     jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
458     jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
459     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
460 }
461
462 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
463 {
464     SpecializedThunkJIT jit(1);
465     stringCharLoad(jit, vm);
466     jit.returnInt32(SpecializedThunkJIT::regT0);
467     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
468 }
469
470 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
471 {
472     SpecializedThunkJIT jit(1);
473     stringCharLoad(jit, vm);
474     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
475     jit.returnJSCell(SpecializedThunkJIT::regT0);
476     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charAt");
477 }
478
479 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
480 {
481     SpecializedThunkJIT jit(1);
482     // load char code
483     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
484     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
485     jit.returnJSCell(SpecializedThunkJIT::regT0);
486     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
487 }
488
489 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
490 {
491     SpecializedThunkJIT jit(1);
492     if (!jit.supportsFloatingPointSqrt())
493         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
494
495     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
496     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
497     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
498     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "sqrt");
499 }
500
501
502 #define UnaryDoubleOpWrapper(function) function##Wrapper
503 enum MathThunkCallingConvention { };
504 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
505 extern "C" {
506
507 double jsRound(double) REFERENCED_FROM_ASM;
508 double jsRound(double d)
509 {
510     double integer = ceil(d);
511     return integer - (integer - d > 0.5);
512 }
513
514 }
515
516 #if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
517
518 #define defineUnaryDoubleOpWrapper(function) \
519     asm( \
520         ".text\n" \
521         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
522         HIDE_SYMBOL(function##Thunk) "\n" \
523         SYMBOL_STRING(function##Thunk) ":" "\n" \
524         "call " GLOBAL_REFERENCE(function) "\n" \
525         "ret\n" \
526     );\
527     extern "C" { \
528         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
529     } \
530     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
531
532 #elif CPU(X86) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
533 #define defineUnaryDoubleOpWrapper(function) \
534     asm( \
535         ".text\n" \
536         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
537         HIDE_SYMBOL(function##Thunk) "\n" \
538         SYMBOL_STRING(function##Thunk) ":" "\n" \
539         "subl $8, %esp\n" \
540         "movsd %xmm0, (%esp) \n" \
541         "call " GLOBAL_REFERENCE(function) "\n" \
542         "fstpl (%esp) \n" \
543         "movsd (%esp), %xmm0 \n" \
544         "addl $8, %esp\n" \
545         "ret\n" \
546     );\
547     extern "C" { \
548         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
549     } \
550     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
551
552 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
553
554 #define defineUnaryDoubleOpWrapper(function) \
555     asm( \
556         ".text\n" \
557         ".align 2\n" \
558         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
559         HIDE_SYMBOL(function##Thunk) "\n" \
560         ".thumb\n" \
561         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
562         SYMBOL_STRING(function##Thunk) ":" "\n" \
563         "push {lr}\n" \
564         "vmov r0, r1, d0\n" \
565         "blx " GLOBAL_REFERENCE(function) "\n" \
566         "vmov d0, r0, r1\n" \
567         "pop {lr}\n" \
568         "bx lr\n" \
569     ); \
570     extern "C" { \
571         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
572     } \
573     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
574 #else
575
576 #define defineUnaryDoubleOpWrapper(function) \
577     static MathThunk UnaryDoubleOpWrapper(function) = 0
578 #endif
579
580 defineUnaryDoubleOpWrapper(jsRound);
581 defineUnaryDoubleOpWrapper(exp);
582 defineUnaryDoubleOpWrapper(log);
583 defineUnaryDoubleOpWrapper(floor);
584 defineUnaryDoubleOpWrapper(ceil);
585
586 static const double oneConstant = 1.0;
587 static const double negativeHalfConstant = -0.5;
588 static const double zeroConstant = 0.0;
589 static const double halfConstant = 0.5;
590     
591 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
592 {
593     SpecializedThunkJIT jit(1);
594     MacroAssembler::Jump nonIntJump;
595     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
596         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
597     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
598     jit.returnInt32(SpecializedThunkJIT::regT0);
599     nonIntJump.link(&jit);
600     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
601     SpecializedThunkJIT::Jump intResult;
602     SpecializedThunkJIT::JumpList doubleResult;
603     if (jit.supportsFloatingPointTruncate()) {
604         jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
605         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
606         SpecializedThunkJIT::JumpList slowPath;
607         // Handle the negative doubles in the slow path for now.
608         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
609         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
610         intResult = jit.jump();
611         slowPath.link(&jit);
612     }
613     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
614     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
615     if (jit.supportsFloatingPointTruncate())
616         intResult.link(&jit);
617     jit.returnInt32(SpecializedThunkJIT::regT0);
618     doubleResult.link(&jit);
619     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
620     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "floor");
621 }
622
623 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
624 {
625     SpecializedThunkJIT jit(1);
626     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
627         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
628     MacroAssembler::Jump nonIntJump;
629     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
630     jit.returnInt32(SpecializedThunkJIT::regT0);
631     nonIntJump.link(&jit);
632     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
633     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
634     SpecializedThunkJIT::JumpList doubleResult;
635     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
636     jit.returnInt32(SpecializedThunkJIT::regT0);
637     doubleResult.link(&jit);
638     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
639     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "ceil");
640 }
641
642 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
643 {
644     SpecializedThunkJIT jit(1);
645     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
646         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
647     MacroAssembler::Jump nonIntJump;
648     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
649     jit.returnInt32(SpecializedThunkJIT::regT0);
650     nonIntJump.link(&jit);
651     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
652     SpecializedThunkJIT::Jump intResult;
653     SpecializedThunkJIT::JumpList doubleResult;
654     if (jit.supportsFloatingPointTruncate()) {
655         jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
656         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
657         SpecializedThunkJIT::JumpList slowPath;
658         // Handle the negative doubles in the slow path for now.
659         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
660         jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
661         jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
662         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
663         intResult = jit.jump();
664         slowPath.link(&jit);
665     }
666     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
667     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
668     if (jit.supportsFloatingPointTruncate())
669         intResult.link(&jit);
670     jit.returnInt32(SpecializedThunkJIT::regT0);
671     doubleResult.link(&jit);
672     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
673     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "round");
674 }
675
676 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
677 {
678     if (!UnaryDoubleOpWrapper(exp))
679         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
680     SpecializedThunkJIT jit(1);
681     if (!jit.supportsFloatingPoint())
682         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
683     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
684     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
685     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
686     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "exp");
687 }
688
689 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
690 {
691     if (!UnaryDoubleOpWrapper(log))
692         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
693     SpecializedThunkJIT jit(1);
694     if (!jit.supportsFloatingPoint())
695         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
696     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
697     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
698     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
699     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "log");
700 }
701
702 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
703 {
704     SpecializedThunkJIT jit(1);
705     if (!jit.supportsFloatingPointAbs())
706         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
707     MacroAssembler::Jump nonIntJump;
708     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
709     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
710     jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
711     jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
712     jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
713     jit.returnInt32(SpecializedThunkJIT::regT0);
714     nonIntJump.link(&jit);
715     // Shame about the double int conversion here.
716     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
717     jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
718     jit.returnDouble(SpecializedThunkJIT::fpRegT1);
719     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "abs");
720 }
721
722 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
723 {
724     SpecializedThunkJIT jit(2);
725     if (!jit.supportsFloatingPoint())
726         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
727
728     jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
729     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
730     MacroAssembler::Jump nonIntExponent;
731     jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
732     jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
733     
734     MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
735     MacroAssembler::Label startLoop(jit.label());
736
737     MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
738     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
739     exponentIsEven.link(&jit);
740     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
741     jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
742     jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
743
744     exponentIsZero.link(&jit);
745
746     {
747         SpecializedThunkJIT::JumpList doubleResult;
748         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
749         jit.returnInt32(SpecializedThunkJIT::regT0);
750         doubleResult.link(&jit);
751         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
752     }
753
754     if (jit.supportsFloatingPointSqrt()) {
755         nonIntExponent.link(&jit);
756         jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
757         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
758         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
759         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
760         jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
761         jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
762
763         SpecializedThunkJIT::JumpList doubleResult;
764         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
765         jit.returnInt32(SpecializedThunkJIT::regT0);
766         doubleResult.link(&jit);
767         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
768     } else
769         jit.appendFailure(nonIntExponent);
770
771     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "pow");
772 }
773
774 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
775 {
776     SpecializedThunkJIT jit(2);
777     MacroAssembler::Jump nonIntArg0Jump;
778     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
779     SpecializedThunkJIT::Label doneLoadingArg0(&jit);
780     MacroAssembler::Jump nonIntArg1Jump;
781     jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
782     SpecializedThunkJIT::Label doneLoadingArg1(&jit);
783     jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
784     jit.returnInt32(SpecializedThunkJIT::regT0);
785
786     if (jit.supportsFloatingPointTruncate()) {
787         nonIntArg0Jump.link(&jit);
788         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
789         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
790         jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
791         jit.jump(doneLoadingArg0);
792     } else
793         jit.appendFailure(nonIntArg0Jump);
794
795     if (jit.supportsFloatingPointTruncate()) {
796         nonIntArg1Jump.link(&jit);
797         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
798         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
799         jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
800         jit.jump(doneLoadingArg1);
801     } else
802         jit.appendFailure(nonIntArg1Jump);
803
804     return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "imul");
805 }
806
807 }
808
809 #endif // ENABLE(JIT)