dd4e414a2ca90f08f27edcbaffb4e7fc309c8e16
[WebKit-https.git] / Source / JavaScriptCore / jit / ThunkGenerators.cpp
1 /*
2  * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "ThunkGenerators.h"
28
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITOperations.h"
32 #include "JSArray.h"
33 #include "JSArrayIterator.h"
34 #include "JSStack.h"
35 #include "MaxFrameExtentForSlowPathCall.h"
36 #include "JSCInlines.h"
37 #include "SpecializedThunkJIT.h"
38 #include <wtf/InlineASM.h>
39 #include <wtf/StringPrintStream.h>
40 #include <wtf/text/StringImpl.h>
41
42 #if ENABLE(JIT)
43
44 namespace JSC {
45
46 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
47 {
48 #if !ASSERT_DISABLED
49     CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
50     jit.breakpoint();
51     isNonZero.link(&jit);
52     jit.pushToSave(pointerGPR);
53     jit.load8(pointerGPR, pointerGPR);
54     jit.popToRestore(pointerGPR);
55 #else
56     UNUSED_PARAM(jit);
57     UNUSED_PARAM(pointerGPR);
58 #endif
59 }
60
61 // We will jump here if the JIT code tries to make a call, but the
62 // linking helper (C++ code) decides to throw an exception instead.
63 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
64 {
65     CCallHelpers jit(vm);
66     
67     // The call pushed a return address, so we need to pop it back off to re-align the stack,
68     // even though we won't use it.
69     jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
70
71     jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
72     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
73     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
74     jit.call(GPRInfo::nonArgGPR0);
75     jit.jumpToExceptionHandler();
76
77     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
78     return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
79 }
80
81 static void slowPathFor(
82     CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
83 {
84     jit.emitFunctionPrologue();
85     jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
86     if (maxFrameExtentForSlowPathCall)
87         jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
88     jit.setupArgumentsExecState();
89     jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
90     emitPointerValidation(jit, GPRInfo::nonArgGPR0);
91     jit.call(GPRInfo::nonArgGPR0);
92     if (maxFrameExtentForSlowPathCall)
93         jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
94     
95     // This slow call will return the address of one of the following:
96     // 1) Exception throwing thunk.
97     // 2) Host call return value returner thingy.
98     // 3) The function to call.
99     emitPointerValidation(jit, GPRInfo::returnValueGPR);
100     jit.emitFunctionEpilogue();
101     jit.jump(GPRInfo::returnValueGPR);
102 }
103
104 static MacroAssemblerCodeRef linkForThunkGenerator(
105     VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
106 {
107     // The return address is on the stack or in the link register. We will hence
108     // save the return address to the call frame while we make a C++ function call
109     // to perform linking and lazy compilation if necessary. We expect the callee
110     // to be in regT0/regT1 (payload/tag), the CallFrame to have already
111     // been adjusted, and all other registers to be available for use.
112     
113     CCallHelpers jit(vm);
114     
115     slowPathFor(jit, vm, operationLinkFor(kind, registers));
116     
117     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
118     return FINALIZE_CODE(
119         patchBuffer,
120         ("Link %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
121 }
122
123 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
124 {
125     return linkForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
126 }
127
128 MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
129 {
130     return linkForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
131 }
132
133 MacroAssemblerCodeRef linkCallThatPreservesRegsThunkGenerator(VM* vm)
134 {
135     return linkForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
136 }
137
138 MacroAssemblerCodeRef linkConstructThatPreservesRegsThunkGenerator(VM* vm)
139 {
140     return linkForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
141 }
142
143 static MacroAssemblerCodeRef linkClosureCallForThunkGenerator(
144     VM* vm, RegisterPreservationMode registers)
145 {
146     CCallHelpers jit(vm);
147     
148     slowPathFor(jit, vm, operationLinkClosureCallFor(registers));
149     
150     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
151     return FINALIZE_CODE(patchBuffer, ("Link closure call %s slow path thunk", registers == MustPreserveRegisters ? " that preserves registers" : ""));
152 }
153
154 // For closure optimizations, we only include calls, since if you're using closures for
155 // object construction then you're going to lose big time anyway.
156 MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm)
157 {
158     return linkClosureCallForThunkGenerator(vm, RegisterPreservationNotRequired);
159 }
160
161 MacroAssemblerCodeRef linkClosureCallThatPreservesRegsThunkGenerator(VM* vm)
162 {
163     return linkClosureCallForThunkGenerator(vm, MustPreserveRegisters);
164 }
165
166 static MacroAssemblerCodeRef virtualForThunkGenerator(
167     VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
168 {
169     // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
170     // The return address is on the stack, or in the link register. We will hence
171     // jump to the callee, or save the return address to the call frame while we
172     // make a C++ function call to the appropriate JIT operation.
173
174     CCallHelpers jit(vm);
175     
176     CCallHelpers::JumpList slowCase;
177
178     // FIXME: we should have a story for eliminating these checks. In many cases,
179     // the DFG knows that the value is definitely a cell, or definitely a function.
180     
181 #if USE(JSVALUE64)
182     jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT2);
183     
184     slowCase.append(
185         jit.branchTest64(
186             CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT2));
187 #else
188     slowCase.append(
189         jit.branch32(
190             CCallHelpers::NotEqual, GPRInfo::regT1,
191             CCallHelpers::TrustedImm32(JSValue::CellTag)));
192 #endif
193     AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT2, GPRInfo::regT1);
194     slowCase.append(
195         jit.branchPtr(
196             CCallHelpers::NotEqual,
197             CCallHelpers::Address(GPRInfo::regT2, Structure::classInfoOffset()),
198             CCallHelpers::TrustedImmPtr(JSFunction::info())));
199     
200     // Now we know we have a JSFunction.
201     
202     jit.loadPtr(
203         CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
204         GPRInfo::regT2);
205     jit.loadPtr(
206         CCallHelpers::Address(
207             GPRInfo::regT2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
208         GPRInfo::regT2);
209     slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT2));
210     
211     // Now we know that we have a CodeBlock, and we're committed to making a fast
212     // call.
213     
214     jit.loadPtr(
215         CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
216         GPRInfo::regT1);
217 #if USE(JSVALUE64)
218     jit.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
219 #else
220     jit.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
221     jit.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag),
222         JSStack::ScopeChain);
223 #endif
224     
225     // Make a tail call. This will return back to JIT code.
226     emitPointerValidation(jit, GPRInfo::regT2);
227     jit.jump(GPRInfo::regT2);
228
229     slowCase.link(&jit);
230     
231     // Here we don't know anything, so revert to the full slow path.
232     
233     slowPathFor(jit, vm, operationVirtualFor(kind, registers));
234     
235     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
236     return FINALIZE_CODE(
237         patchBuffer,
238         ("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
239 }
240
241 MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
242 {
243     return virtualForThunkGenerator(vm, CodeForCall, RegisterPreservationNotRequired);
244 }
245
246 MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
247 {
248     return virtualForThunkGenerator(vm, CodeForConstruct, RegisterPreservationNotRequired);
249 }
250
251 MacroAssemblerCodeRef virtualCallThatPreservesRegsThunkGenerator(VM* vm)
252 {
253     return virtualForThunkGenerator(vm, CodeForCall, MustPreserveRegisters);
254 }
255
256 MacroAssemblerCodeRef virtualConstructThatPreservesRegsThunkGenerator(VM* vm)
257 {
258     return virtualForThunkGenerator(vm, CodeForConstruct, MustPreserveRegisters);
259 }
260
261 enum ThunkEntryType { EnterViaCall, EnterViaJump };
262
263 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
264 {
265     int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
266     
267     JSInterfaceJIT jit(vm);
268
269     if (entryType == EnterViaCall)
270         jit.emitFunctionPrologue();
271
272     jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
273     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
274
275 #if CPU(X86)
276     // Load caller frame's scope chain into this callframe so that whatever we call can
277     // get to its global data.
278     jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
279     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
280     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
281
282     // Calling convention:      f(ecx, edx, ...);
283     // Host function signature: f(ExecState*);
284     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
285
286     jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
287
288     // call the function
289     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
290     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
291     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
292
293     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
294
295 #elif CPU(X86_64)
296     // Load caller frame's scope chain into this callframe so that whatever we call can
297     // get to its global data.
298     jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
299     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
300     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
301 #if !OS(WINDOWS)
302     // Calling convention:      f(edi, esi, edx, ecx, ...);
303     // Host function signature: f(ExecState*);
304     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
305
306     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
307     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
308     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
309
310 #else
311     // Calling convention:      f(ecx, edx, r8, r9, ...);
312     // Host function signature: f(ExecState*);
313     jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
314
315     // Leave space for the callee parameter home addresses and align the stack.
316     jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
317
318     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
319     jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
320     jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
321
322     jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
323 #endif
324
325 #elif CPU(ARM64)
326     COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
327     COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
328     COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
329     COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
330     COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
331
332     // Load caller frame's scope chain into this callframe so that whatever we call can
333     // get to its global data.
334     jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3);
335     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3);
336     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
337
338     // Host function signature: f(ExecState*);
339     jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
340
341     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
342     jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
343     jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
344 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
345     // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
346     jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2);
347     jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
348     jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
349
350 #if CPU(MIPS)
351     // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
352     jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
353 #endif
354
355     // Calling convention is f(argumentGPR0, argumentGPR1, ...).
356     // Host function signature is f(ExecState*).
357     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
358
359     jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
360     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
361     jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
362
363 #if CPU(MIPS)
364     // Restore stack space
365     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
366 #endif
367 #else
368 #error "JIT not supported on this platform."
369     UNUSED_PARAM(executableOffsetToFunction);
370     breakpoint();
371 #endif
372
373     // Check for an exception
374 #if USE(JSVALUE64)
375     jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
376     JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
377 #else
378     JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
379         JSInterfaceJIT::NotEqual,
380         JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
381         JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
382 #endif
383
384     jit.emitFunctionEpilogue();
385     // Return.
386     jit.ret();
387
388     // Handle an exception
389     exceptionHandler.link(&jit);
390
391     jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
392
393 #if CPU(X86) && USE(JSVALUE32_64)
394     jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
395     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0);
396     jit.push(JSInterfaceJIT::regT0);
397 #else
398     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0);
399 #endif
400     jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
401     jit.call(JSInterfaceJIT::regT3);
402 #if CPU(X86) && USE(JSVALUE32_64)
403     jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
404 #endif
405
406     jit.jumpToExceptionHandler();
407
408     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
409     return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
410 }
411
412 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
413 {
414     return nativeForGenerator(vm, CodeForCall);
415 }
416
417 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
418 {
419     return nativeForGenerator(vm, CodeForCall, EnterViaJump);
420 }
421
422 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
423 {
424     return nativeForGenerator(vm, CodeForConstruct);
425 }
426
427 MacroAssemblerCodeRef arityFixup(VM* vm)
428 {
429     JSInterfaceJIT jit(vm);
430
431     // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in
432     // regT5.
433 #if USE(JSVALUE64)
434 #  if CPU(X86_64)
435     jit.pop(JSInterfaceJIT::regT4);
436 #  endif
437     jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
438     jit.neg64(JSInterfaceJIT::regT0);
439     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6);
440     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
441     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
442
443     // Move current frame down regT0 number of slots
444     JSInterfaceJIT::Label copyLoop(jit.label());
445     jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1);
446     jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
447     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
448     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
449
450     // Fill in regT0 - 1 missing arg slots with undefined
451     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
452     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
453     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
454     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
455     jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
456     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6);
457     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
458     
459     // Adjust call frame register and stack pointer to account for missing args
460     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
461     jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
462     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
463     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
464
465     // Save the original return PC.
466     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
467     jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
468     
469     // Install the new return PC.
470     jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
471
472 #  if CPU(X86_64)
473     jit.push(JSInterfaceJIT::regT4);
474 #  endif
475     jit.ret();
476 #else
477 #  if CPU(X86)
478     jit.pop(JSInterfaceJIT::regT4);
479 #  endif
480     jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0);
481     jit.neg32(JSInterfaceJIT::regT0);
482     jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
483     jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2);
484     jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
485
486     // Move current frame down regT0 number of slots
487     JSInterfaceJIT::Label copyLoop(jit.label());
488     jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
489     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
490     jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
491     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
492     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
493     jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
494
495     // Fill in regT0 - 1 missing arg slots with undefined
496     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
497     jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2);
498     JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
499     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
500     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
501     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
502     jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
503
504     jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
505     jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
506
507     // Adjust call frame register and stack pointer to account for missing args
508     jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1);
509     jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1);
510     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);
511     jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister);
512
513     // Save the original return PC.
514     jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1);
515     jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
516     
517     // Install the new return PC.
518     jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()));
519     
520 #  if CPU(X86)
521     jit.push(JSInterfaceJIT::regT4);
522 #  endif
523     jit.ret();
524 #endif
525
526     LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
527     return FINALIZE_CODE(patchBuffer, ("fixup arity"));
528 }
529
530 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
531 {
532     // load string
533     jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
534
535     // Load string length to regT2, and start the process of loading the data pointer into regT0
536     jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
537     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
538     jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
539
540     // load index
541     jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
542
543     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
544     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
545
546     // Load the character
547     SpecializedThunkJIT::JumpList is16Bit;
548     SpecializedThunkJIT::JumpList cont8Bit;
549     // Load the string flags
550     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
551     jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
552     is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
553     jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
554     cont8Bit.append(jit.jump());
555     is16Bit.link(&jit);
556     jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
557     cont8Bit.link(&jit);
558 }
559
560 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
561 {
562     jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
563     jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
564     jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
565     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
566 }
567
568 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
569 {
570     SpecializedThunkJIT jit(vm, 1);
571     stringCharLoad(jit, vm);
572     jit.returnInt32(SpecializedThunkJIT::regT0);
573     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
574 }
575
576 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
577 {
578     SpecializedThunkJIT jit(vm, 1);
579     stringCharLoad(jit, vm);
580     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
581     jit.returnJSCell(SpecializedThunkJIT::regT0);
582     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
583 }
584
585 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
586 {
587     SpecializedThunkJIT jit(vm, 1);
588     // load char code
589     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
590     charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
591     jit.returnJSCell(SpecializedThunkJIT::regT0);
592     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
593 }
594
595 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
596 {
597     SpecializedThunkJIT jit(vm, 1);
598     if (!jit.supportsFloatingPointSqrt())
599         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
600
601     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
602     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
603     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
604     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
605 }
606
607
608 #define UnaryDoubleOpWrapper(function) function##Wrapper
609 enum MathThunkCallingConvention { };
610 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
611 extern "C" {
612
613 double jsRound(double) REFERENCED_FROM_ASM;
614 double jsRound(double d)
615 {
616     double integer = ceil(d);
617     return integer - (integer - d > 0.5);
618 }
619
620 }
621
622 #if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
623
624 #define defineUnaryDoubleOpWrapper(function) \
625     asm( \
626         ".text\n" \
627         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
628         HIDE_SYMBOL(function##Thunk) "\n" \
629         SYMBOL_STRING(function##Thunk) ":" "\n" \
630         "pushq %rax\n" \
631         "call " GLOBAL_REFERENCE(function) "\n" \
632         "popq %rcx\n" \
633         "ret\n" \
634     );\
635     extern "C" { \
636         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
637     } \
638     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
639
640 #elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
641 #define defineUnaryDoubleOpWrapper(function) \
642     asm( \
643         ".text\n" \
644         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
645         HIDE_SYMBOL(function##Thunk) "\n" \
646         SYMBOL_STRING(function##Thunk) ":" "\n" \
647         "subl $20, %esp\n" \
648         "movsd %xmm0, (%esp) \n" \
649         "call " GLOBAL_REFERENCE(function) "\n" \
650         "fstpl (%esp) \n" \
651         "movsd (%esp), %xmm0 \n" \
652         "addl $20, %esp\n" \
653         "ret\n" \
654     );\
655     extern "C" { \
656         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
657     } \
658     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
659
660 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
661
662 #define defineUnaryDoubleOpWrapper(function) \
663     asm( \
664         ".text\n" \
665         ".align 2\n" \
666         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
667         HIDE_SYMBOL(function##Thunk) "\n" \
668         ".thumb\n" \
669         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
670         SYMBOL_STRING(function##Thunk) ":" "\n" \
671         "push {lr}\n" \
672         "vmov r0, r1, d0\n" \
673         "blx " GLOBAL_REFERENCE(function) "\n" \
674         "vmov d0, r0, r1\n" \
675         "pop {lr}\n" \
676         "bx lr\n" \
677     ); \
678     extern "C" { \
679         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
680     } \
681     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
682
683 #elif CPU(ARM64)
684
685 #define defineUnaryDoubleOpWrapper(function) \
686     asm( \
687         ".text\n" \
688         ".align 2\n" \
689         ".globl " SYMBOL_STRING(function##Thunk) "\n" \
690         HIDE_SYMBOL(function##Thunk) "\n" \
691         SYMBOL_STRING(function##Thunk) ":" "\n" \
692         "b " GLOBAL_REFERENCE(function) "\n" \
693     ); \
694     extern "C" { \
695         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
696     } \
697     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
698
699 #else
700
701 #define defineUnaryDoubleOpWrapper(function) \
702     static MathThunk UnaryDoubleOpWrapper(function) = 0
703 #endif
704
705 defineUnaryDoubleOpWrapper(jsRound);
706 defineUnaryDoubleOpWrapper(exp);
707 defineUnaryDoubleOpWrapper(log);
708 defineUnaryDoubleOpWrapper(floor);
709 defineUnaryDoubleOpWrapper(ceil);
710
711 static const double oneConstant = 1.0;
712 static const double negativeHalfConstant = -0.5;
713 static const double zeroConstant = 0.0;
714 static const double halfConstant = 0.5;
715     
716 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
717 {
718     SpecializedThunkJIT jit(vm, 1);
719     MacroAssembler::Jump nonIntJump;
720     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
721         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
722     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
723     jit.returnInt32(SpecializedThunkJIT::regT0);
724     nonIntJump.link(&jit);
725     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
726 #if CPU(ARM64)
727     SpecializedThunkJIT::JumpList doubleResult;
728     jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
729     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
730     jit.returnInt32(SpecializedThunkJIT::regT0);
731     doubleResult.link(&jit);
732     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
733 #else
734     SpecializedThunkJIT::Jump intResult;
735     SpecializedThunkJIT::JumpList doubleResult;
736     if (jit.supportsFloatingPointTruncate()) {
737         jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
738         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
739         SpecializedThunkJIT::JumpList slowPath;
740         // Handle the negative doubles in the slow path for now.
741         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
742         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
743         intResult = jit.jump();
744         slowPath.link(&jit);
745     }
746     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
747     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
748     if (jit.supportsFloatingPointTruncate())
749         intResult.link(&jit);
750     jit.returnInt32(SpecializedThunkJIT::regT0);
751     doubleResult.link(&jit);
752     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
753 #endif // CPU(ARM64)
754     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
755 }
756
757 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
758 {
759     SpecializedThunkJIT jit(vm, 1);
760     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
761         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
762     MacroAssembler::Jump nonIntJump;
763     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
764     jit.returnInt32(SpecializedThunkJIT::regT0);
765     nonIntJump.link(&jit);
766     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
767 #if CPU(ARM64)
768     jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
769 #else
770     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
771 #endif // CPU(ARM64)
772     SpecializedThunkJIT::JumpList doubleResult;
773     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
774     jit.returnInt32(SpecializedThunkJIT::regT0);
775     doubleResult.link(&jit);
776     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
777     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
778 }
779
780 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
781 {
782     SpecializedThunkJIT jit(vm, 1);
783     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
784         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
785     MacroAssembler::Jump nonIntJump;
786     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
787     jit.returnInt32(SpecializedThunkJIT::regT0);
788     nonIntJump.link(&jit);
789     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
790     SpecializedThunkJIT::Jump intResult;
791     SpecializedThunkJIT::JumpList doubleResult;
792     if (jit.supportsFloatingPointTruncate()) {
793         jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
794         doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
795         SpecializedThunkJIT::JumpList slowPath;
796         // Handle the negative doubles in the slow path for now.
797         slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
798         jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
799         jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
800         slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
801         intResult = jit.jump();
802         slowPath.link(&jit);
803     }
804     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
805     jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
806     if (jit.supportsFloatingPointTruncate())
807         intResult.link(&jit);
808     jit.returnInt32(SpecializedThunkJIT::regT0);
809     doubleResult.link(&jit);
810     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
811     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
812 }
813
814 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
815 {
816     if (!UnaryDoubleOpWrapper(exp))
817         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
818     SpecializedThunkJIT jit(vm, 1);
819     if (!jit.supportsFloatingPoint())
820         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
821     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
822     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
823     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
824     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
825 }
826
827 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
828 {
829     if (!UnaryDoubleOpWrapper(log))
830         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
831     SpecializedThunkJIT jit(vm, 1);
832     if (!jit.supportsFloatingPoint())
833         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
834     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
835     jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
836     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
837     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
838 }
839
840 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
841 {
842     SpecializedThunkJIT jit(vm, 1);
843     if (!jit.supportsFloatingPointAbs())
844         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
845     MacroAssembler::Jump nonIntJump;
846     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
847     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
848     jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
849     jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
850     jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
851     jit.returnInt32(SpecializedThunkJIT::regT0);
852     nonIntJump.link(&jit);
853     // Shame about the double int conversion here.
854     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
855     jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
856     jit.returnDouble(SpecializedThunkJIT::fpRegT1);
857     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
858 }
859
860 MacroAssemblerCodeRef powThunkGenerator(VM* vm)
861 {
862     SpecializedThunkJIT jit(vm, 2);
863     if (!jit.supportsFloatingPoint())
864         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
865
866     jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
867     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
868     MacroAssembler::Jump nonIntExponent;
869     jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
870     jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
871     
872     MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
873     MacroAssembler::Label startLoop(jit.label());
874
875     MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
876     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
877     exponentIsEven.link(&jit);
878     jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
879     jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
880     jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
881
882     exponentIsZero.link(&jit);
883
884     {
885         SpecializedThunkJIT::JumpList doubleResult;
886         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
887         jit.returnInt32(SpecializedThunkJIT::regT0);
888         doubleResult.link(&jit);
889         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
890     }
891
892     if (jit.supportsFloatingPointSqrt()) {
893         nonIntExponent.link(&jit);
894         jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
895         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
896         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
897         jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
898         jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
899         jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
900
901         SpecializedThunkJIT::JumpList doubleResult;
902         jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
903         jit.returnInt32(SpecializedThunkJIT::regT0);
904         doubleResult.link(&jit);
905         jit.returnDouble(SpecializedThunkJIT::fpRegT1);
906     } else
907         jit.appendFailure(nonIntExponent);
908
909     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
910 }
911
912 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
913 {
914     SpecializedThunkJIT jit(vm, 2);
915     MacroAssembler::Jump nonIntArg0Jump;
916     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
917     SpecializedThunkJIT::Label doneLoadingArg0(&jit);
918     MacroAssembler::Jump nonIntArg1Jump;
919     jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
920     SpecializedThunkJIT::Label doneLoadingArg1(&jit);
921     jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
922     jit.returnInt32(SpecializedThunkJIT::regT0);
923
924     if (jit.supportsFloatingPointTruncate()) {
925         nonIntArg0Jump.link(&jit);
926         jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
927         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
928         jit.appendFailure(jit.jump());
929     } else
930         jit.appendFailure(nonIntArg0Jump);
931
932     if (jit.supportsFloatingPointTruncate()) {
933         nonIntArg1Jump.link(&jit);
934         jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
935         jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
936         jit.appendFailure(jit.jump());
937     } else
938         jit.appendFailure(nonIntArg1Jump);
939
940     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
941 }
942
943 static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind)
944 {
945     typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32;
946     typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr;
947     typedef SpecializedThunkJIT::Address Address;
948     typedef SpecializedThunkJIT::BaseIndex BaseIndex;
949     typedef SpecializedThunkJIT::Jump Jump;
950     
951     SpecializedThunkJIT jit(vm);
952     // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively
953     jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1);
954
955     // Early exit if we don't have a thunk for this form of iteration
956     jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue)));
957     
958     jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0);
959     
960     jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1);
961     
962     // Pull out the butterfly from iteratedObject
963     jit.load8(Address(SpecializedThunkJIT::regT0, JSCell::indexingTypeOffset()), SpecializedThunkJIT::regT3);
964     jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
965     
966     jit.and32(TrustedImm32(IndexingShapeMask), SpecializedThunkJIT::regT3);
967
968     Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength()));
969     // Return the termination signal to indicate that we've finished
970     jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0);
971     jit.returnJSCell(SpecializedThunkJIT::regT0);
972     
973     notDone.link(&jit);
974     
975     if (kind == ArrayIterateKey) {
976         jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
977         jit.returnInt32(SpecializedThunkJIT::regT1);
978         return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-key");
979         
980     }
981     ASSERT(kind == ArrayIterateValue);
982     
983     // Okay, now we're returning a value so make sure we're inside the vector size
984     jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength())));
985     
986     // So now we perform inline loads for int32, value/undecided, and double storage
987     Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(UndecidedShape));
988     Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ContiguousShape));
989     
990     undecidedStorage.link(&jit);
991     
992     jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
993     
994 #if USE(JSVALUE64)
995     jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0);
996     Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0);
997     jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0);
998     notHole.link(&jit);
999     jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1000     jit.returnJSValue(SpecializedThunkJIT::regT0);
1001 #else
1002     jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3);
1003     Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag));
1004     jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
1005     jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0);
1006     jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1007     jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1);
1008     notHole.link(&jit);
1009     jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
1010     jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1011     jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1);
1012     jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
1013 #endif
1014     notContiguousStorage.link(&jit);
1015     
1016     Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(Int32Shape));
1017     jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
1018     jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
1019     jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1020     jit.returnInt32(SpecializedThunkJIT::regT0);
1021     notInt32Storage.link(&jit);
1022     
1023     jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(DoubleShape)));
1024     jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
1025     jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0);
1026     jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
1027     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1028     
1029     return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "array-iterator-next-value");
1030 }
1031
1032 MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm)
1033 {
1034     return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey);
1035 }
1036
1037 MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm)
1038 {
1039     return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue);
1040 }
1041     
1042 }
1043
1044 #endif // ENABLE(JIT)