2 * Copyright (C) 2010, 2012-2014, 2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "ThunkGenerators.h"
29 #include "CodeBlock.h"
30 #include "DFGSpeculativeJIT.h"
31 #include "JITExceptions.h"
32 #include "JITOperations.h"
34 #include "JSBoundFunction.h"
35 #include "MathCommon.h"
36 #include "MaxFrameExtentForSlowPathCall.h"
37 #include "JSCInlines.h"
38 #include "JSWebAssemblyInstance.h"
39 #include "JSWebAssemblyRuntimeError.h"
40 #include "SpecializedThunkJIT.h"
41 #include "WasmExceptionType.h"
42 #include <wtf/InlineASM.h>
43 #include <wtf/StringPrintStream.h>
44 #include <wtf/text/StringImpl.h>
50 inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
54 CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
55 jit.abortWithReason(TGInvalidPointer);
57 jit.pushToSave(pointerGPR);
58 jit.load8(pointerGPR, pointerGPR);
59 jit.popToRestore(pointerGPR);
62 // We will jump here if the JIT code tries to make a call, but the
63 // linking helper (C++ code) decides to throw an exception instead.
64 MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
68 // The call pushed a return address, so we need to pop it back off to re-align the stack,
69 // even though we won't use it.
70 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
72 jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
74 jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
75 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
76 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
77 jit.call(GPRInfo::nonArgGPR0);
78 jit.jumpToExceptionHandler();
80 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
81 return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
84 static void slowPathFor(
85 CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
87 jit.emitFunctionPrologue();
88 jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
89 #if OS(WINDOWS) && CPU(X86_64)
90 // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
91 // Other argument values are shift by 1. Use space on the stack for our two return values.
92 // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
93 // and space for the 16 byte return area.
94 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
95 jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
96 jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
97 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
98 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
99 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
100 jit.call(GPRInfo::nonArgGPR0);
101 jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
102 jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
103 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
105 if (maxFrameExtentForSlowPathCall)
106 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
107 jit.setupArgumentsWithExecState(GPRInfo::regT2);
108 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
109 emitPointerValidation(jit, GPRInfo::nonArgGPR0);
110 jit.call(GPRInfo::nonArgGPR0);
111 if (maxFrameExtentForSlowPathCall)
112 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
115 // This slow call will return the address of one of the following:
116 // 1) Exception throwing thunk.
117 // 2) Host call return value returner thingy.
118 // 3) The function to call.
119 // The second return value GPR will hold a non-zero value for tail calls.
121 emitPointerValidation(jit, GPRInfo::returnValueGPR);
122 jit.emitFunctionEpilogue();
124 RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
125 CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
127 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
128 jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
130 doNotTrash.link(&jit);
131 jit.jump(GPRInfo::returnValueGPR);
134 MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
136 // The return address is on the stack or in the link register. We will hence
137 // save the return address to the call frame while we make a C++ function call
138 // to perform linking and lazy compilation if necessary. We expect the callee
139 // to be in regT0/regT1 (payload/tag), the CallFrame to have already
140 // been adjusted, and all other registers to be available for use.
141 CCallHelpers jit(vm);
143 slowPathFor(jit, vm, operationLinkCall);
145 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
146 return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk"));
149 // For closure optimizations, we only include calls, since if you're using closures for
150 // object construction then you're going to lose big time anyway.
151 MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
153 CCallHelpers jit(vm);
155 slowPathFor(jit, vm, operationLinkPolymorphicCall);
157 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
158 return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk"));
161 // FIXME: We should distinguish between a megamorphic virtual call vs. a slow
162 // path virtual call so that we can enable fast tail calls for megamorphic
163 // virtual calls by using the shuffler.
164 // https://bugs.webkit.org/show_bug.cgi?id=148831
165 MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
167 // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
168 // The return address is on the stack, or in the link register. We will hence
169 // jump to the callee, or save the return address to the call frame while we
170 // make a C++ function call to the appropriate JIT operation.
172 CCallHelpers jit(vm);
174 CCallHelpers::JumpList slowCase;
176 // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
177 // slow path execution for the profiler.
179 CCallHelpers::TrustedImm32(1),
180 CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
182 // FIXME: we should have a story for eliminating these checks. In many cases,
183 // the DFG knows that the value is definitely a cell, or definitely a function.
188 CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::tagMaskRegister));
192 CCallHelpers::NotEqual, GPRInfo::regT1,
193 CCallHelpers::TrustedImm32(JSValue::CellTag)));
195 slowCase.append(jit.branchIfNotType(GPRInfo::regT0, JSFunctionType));
197 // Now we know we have a JSFunction.
200 CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
203 CCallHelpers::Address(
204 GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
205 callLinkInfo.specializationKind())),
207 slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
209 // Now we know that we have a CodeBlock, and we're committed to making a fast
212 // Make a tail call. This will return back to JIT code.
213 emitPointerValidation(jit, GPRInfo::regT4);
214 if (callLinkInfo.isTailCall()) {
215 jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
216 jit.prepareForTailCallSlow(GPRInfo::regT4);
218 jit.jump(GPRInfo::regT4);
222 // Here we don't know anything, so revert to the full slow path.
224 slowPathFor(jit, vm, operationVirtualCall);
226 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
227 return FINALIZE_CODE(
229 ("Virtual %s slow path thunk",
230 callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
233 enum ThunkEntryType { EnterViaCall, EnterViaJumpWithSavedTags, EnterViaJumpWithoutSavedTags };
235 static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
237 // FIXME: This should be able to log ShadowChicken prologue packets.
238 // https://bugs.webkit.org/show_bug.cgi?id=155689
240 int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
242 JSInterfaceJIT jit(vm);
246 jit.emitFunctionPrologue();
248 case EnterViaJumpWithSavedTags:
250 // We're coming from a specialized thunk that has saved the prior tag registers' contents.
253 jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
255 jit.pop(JSInterfaceJIT::tagMaskRegister);
256 jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
260 case EnterViaJumpWithoutSavedTags:
261 jit.move(JSInterfaceJIT::framePointerRegister, JSInterfaceJIT::stackPointerRegister);
265 jit.emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
266 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
269 // Calling convention: f(ecx, edx, ...);
270 // Host function signature: f(ExecState*);
271 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
273 jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
276 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::regT1);
277 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
278 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
280 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
284 // Calling convention: f(edi, esi, edx, ecx, ...);
285 // Host function signature: f(ExecState*);
286 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
288 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::esi);
289 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
290 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
293 // Calling convention: f(ecx, edx, r8, r9, ...);
294 // Host function signature: f(ExecState*);
295 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
297 // Leave space for the callee parameter home addresses.
298 // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
299 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
301 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::edx);
302 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
303 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
305 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
309 COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
310 COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
311 COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
313 // Host function signature: f(ExecState*);
314 jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
316 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, ARM64Registers::x1);
317 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
318 jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
319 #elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
321 // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
322 jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
325 // Calling convention is f(argumentGPR0, argumentGPR1, ...).
326 // Host function signature is f(ExecState*).
327 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
329 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::argumentGPR1);
330 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
331 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
334 // Restore stack space
335 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
338 #error "JIT not supported on this platform."
339 UNUSED_PARAM(executableOffsetToFunction);
340 abortWithReason(TGNotSupported);
343 // Check for an exception
345 jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
346 JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
348 JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
349 JSInterfaceJIT::NotEqual,
350 JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
351 JSInterfaceJIT::TrustedImm32(0));
354 jit.emitFunctionEpilogue();
358 // Handle an exception
359 exceptionHandler.link(&jit);
361 jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
362 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
364 #if CPU(X86) && USE(JSVALUE32_64)
365 jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
366 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
367 jit.push(JSInterfaceJIT::regT0);
370 // Allocate space on stack for the 4 parameter registers.
371 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
373 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
375 jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
376 jit.call(JSInterfaceJIT::regT3);
377 #if CPU(X86) && USE(JSVALUE32_64)
378 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
380 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
383 jit.jumpToExceptionHandler();
385 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
386 return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data()));
389 MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
391 return nativeForGenerator(vm, CodeForCall);
394 MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
396 return nativeForGenerator(vm, CodeForCall, EnterViaJumpWithSavedTags);
399 MacroAssemblerCodeRef nativeTailCallWithoutSavedTagsGenerator(VM* vm)
401 return nativeForGenerator(vm, CodeForCall, EnterViaJumpWithoutSavedTags);
404 MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
406 return nativeForGenerator(vm, CodeForConstruct);
409 MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
411 JSInterfaceJIT jit(vm);
413 // We enter with fixup count in argumentGPR0
414 // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
417 const GPRReg extraTemp = JSInterfaceJIT::regT0;
419 const GPRReg extraTemp = JSInterfaceJIT::regT5;
422 jit.pop(JSInterfaceJIT::regT4);
424 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
425 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
426 jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
428 // Check to see if we have extra slots we can use
429 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
430 jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
431 JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
432 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
433 JSInterfaceJIT::Label fillExtraSlots(jit.label());
434 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
435 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
436 jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
437 jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
438 JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
439 noExtraSlot.link(&jit);
441 jit.neg64(JSInterfaceJIT::argumentGPR0);
443 // Move current frame down argumentGPR0 number of slots
444 JSInterfaceJIT::Label copyLoop(jit.label());
445 jit.load64(JSInterfaceJIT::regT3, extraTemp);
446 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
447 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
448 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
450 // Fill in argumentGPR0 missing arg slots with undefined
451 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
452 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
453 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
454 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
455 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
456 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
458 // Adjust call frame register and stack pointer to account for missing args
459 jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
460 jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
461 jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
462 jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
467 jit.push(JSInterfaceJIT::regT4);
472 jit.pop(JSInterfaceJIT::regT4);
474 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
475 jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
476 jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
478 // Check to see if we have extra slots we can use
479 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
480 jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
481 JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
482 JSInterfaceJIT::Label fillExtraSlots(jit.label());
483 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
484 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
485 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
486 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
487 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
488 jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
489 jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
490 JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
491 noExtraSlot.link(&jit);
493 jit.neg32(JSInterfaceJIT::argumentGPR0);
495 // Move current frame down argumentGPR0 number of slots
496 JSInterfaceJIT::Label copyLoop(jit.label());
497 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
498 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
499 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
500 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
501 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
502 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
504 // Fill in argumentGPR0 missing arg slots with undefined
505 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
506 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
507 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
508 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
509 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
510 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
512 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
513 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
515 // Adjust call frame register and stack pointer to account for missing args
516 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
517 jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
518 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
519 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
524 jit.push(JSInterfaceJIT::regT4);
529 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
530 return FINALIZE_CODE(patchBuffer, ("fixup arity"));
533 MacroAssemblerCodeRef unreachableGenerator(VM* vm)
535 JSInterfaceJIT jit(vm);
539 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
540 return FINALIZE_CODE(patchBuffer, ("unreachable thunk"));
543 static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
546 jit.loadJSStringArgument(*vm, SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
548 // Load string length to regT2, and start the process of loading the data pointer into regT0
549 jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
550 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
551 jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
554 jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
556 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
557 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
559 // Load the character
560 SpecializedThunkJIT::JumpList is16Bit;
561 SpecializedThunkJIT::JumpList cont8Bit;
562 // Load the string flags
563 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
564 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
565 is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
566 jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
567 cont8Bit.append(jit.jump());
569 jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
573 static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
575 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
576 jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
577 jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
578 jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
581 MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
583 SpecializedThunkJIT jit(vm, 1);
584 stringCharLoad(jit, vm);
585 jit.returnInt32(SpecializedThunkJIT::regT0);
586 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
589 MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
591 SpecializedThunkJIT jit(vm, 1);
592 stringCharLoad(jit, vm);
593 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
594 jit.returnJSCell(SpecializedThunkJIT::regT0);
595 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
598 MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
600 SpecializedThunkJIT jit(vm, 1);
602 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
603 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
604 jit.returnJSCell(SpecializedThunkJIT::regT0);
605 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
608 MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
610 SpecializedThunkJIT jit(vm, 1);
611 MacroAssembler::Jump nonIntArgJump;
612 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
614 SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
615 jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
616 jit.returnInt32(SpecializedThunkJIT::regT1);
618 if (jit.supportsFloatingPointTruncate()) {
619 nonIntArgJump.link(&jit);
620 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
621 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
622 jit.appendFailure(jit.jump());
624 jit.appendFailure(nonIntArgJump);
626 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
629 MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
631 SpecializedThunkJIT jit(vm, 1);
632 if (!jit.supportsFloatingPointSqrt())
633 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
635 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
636 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
637 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
638 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
642 #define UnaryDoubleOpWrapper(function) function##Wrapper
643 enum MathThunkCallingConvention { };
644 typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
646 #if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
648 #define defineUnaryDoubleOpWrapper(function) \
651 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
652 HIDE_SYMBOL(function##Thunk) "\n" \
653 SYMBOL_STRING(function##Thunk) ":" "\n" \
655 "call " GLOBAL_REFERENCE(function) "\n" \
660 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
662 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
664 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
665 #define defineUnaryDoubleOpWrapper(function) \
668 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
669 HIDE_SYMBOL(function##Thunk) "\n" \
670 SYMBOL_STRING(function##Thunk) ":" "\n" \
673 "movsd %xmm0, (%esp) \n" \
674 "call __x86.get_pc_thunk.bx\n" \
675 "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
676 "call " GLOBAL_REFERENCE(function) "\n" \
678 "movsd (%esp), %xmm0 \n" \
684 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
686 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
688 #elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
689 #define defineUnaryDoubleOpWrapper(function) \
692 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
693 HIDE_SYMBOL(function##Thunk) "\n" \
694 SYMBOL_STRING(function##Thunk) ":" "\n" \
696 "movsd %xmm0, (%esp) \n" \
697 "call " GLOBAL_REFERENCE(function) "\n" \
699 "movsd (%esp), %xmm0 \n" \
704 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
706 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
708 #elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
710 #define defineUnaryDoubleOpWrapper(function) \
714 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
715 HIDE_SYMBOL(function##Thunk) "\n" \
717 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
718 SYMBOL_STRING(function##Thunk) ":" "\n" \
720 "vmov r0, r1, d0\n" \
721 "blx " GLOBAL_REFERENCE(function) "\n" \
722 "vmov d0, r0, r1\n" \
727 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
729 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
733 #define defineUnaryDoubleOpWrapper(function) \
737 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
738 HIDE_SYMBOL(function##Thunk) "\n" \
739 SYMBOL_STRING(function##Thunk) ":" "\n" \
740 "b " GLOBAL_REFERENCE(function) "\n" \
744 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
746 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
748 #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
750 // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
751 static double (_cdecl *floorFunction)(double) = floor;
752 static double (_cdecl *ceilFunction)(double) = ceil;
753 static double (_cdecl *truncFunction)(double) = trunc;
754 static double (_cdecl *expFunction)(double) = exp;
755 static double (_cdecl *logFunction)(double) = log;
756 static double (_cdecl *jsRoundFunction)(double) = jsRound;
758 #define defineUnaryDoubleOpWrapper(function) \
759 extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
764 __asm movsd mmword ptr [esp], xmm0 \
765 __asm call function##Function \
766 __asm fstp qword ptr [esp] \
767 __asm movsd xmm0, mmword ptr [esp] \
772 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
776 #define defineUnaryDoubleOpWrapper(function) \
777 static MathThunk UnaryDoubleOpWrapper(function) = 0
780 defineUnaryDoubleOpWrapper(jsRound);
781 defineUnaryDoubleOpWrapper(exp);
782 defineUnaryDoubleOpWrapper(log);
783 defineUnaryDoubleOpWrapper(floor);
784 defineUnaryDoubleOpWrapper(ceil);
785 defineUnaryDoubleOpWrapper(trunc);
787 static const double halfConstant = 0.5;
789 MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
791 SpecializedThunkJIT jit(vm, 1);
792 MacroAssembler::Jump nonIntJump;
793 if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
794 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
795 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
796 jit.returnInt32(SpecializedThunkJIT::regT0);
797 nonIntJump.link(&jit);
798 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
800 if (jit.supportsFloatingPointRounding()) {
801 SpecializedThunkJIT::JumpList doubleResult;
802 jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
803 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
804 jit.returnInt32(SpecializedThunkJIT::regT0);
805 doubleResult.link(&jit);
806 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
807 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
810 SpecializedThunkJIT::Jump intResult;
811 SpecializedThunkJIT::JumpList doubleResult;
812 if (jit.supportsFloatingPointTruncate()) {
813 jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
814 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
815 SpecializedThunkJIT::JumpList slowPath;
816 // Handle the negative doubles in the slow path for now.
817 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
818 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
819 intResult = jit.jump();
822 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
823 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
824 if (jit.supportsFloatingPointTruncate())
825 intResult.link(&jit);
826 jit.returnInt32(SpecializedThunkJIT::regT0);
827 doubleResult.link(&jit);
828 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
829 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
832 MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
834 SpecializedThunkJIT jit(vm, 1);
835 if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
836 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
837 MacroAssembler::Jump nonIntJump;
838 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
839 jit.returnInt32(SpecializedThunkJIT::regT0);
840 nonIntJump.link(&jit);
841 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
842 if (jit.supportsFloatingPointRounding())
843 jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
845 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
847 SpecializedThunkJIT::JumpList doubleResult;
848 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
849 jit.returnInt32(SpecializedThunkJIT::regT0);
850 doubleResult.link(&jit);
851 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
852 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
855 MacroAssemblerCodeRef truncThunkGenerator(VM* vm)
857 SpecializedThunkJIT jit(vm, 1);
858 if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
859 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
860 MacroAssembler::Jump nonIntJump;
861 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
862 jit.returnInt32(SpecializedThunkJIT::regT0);
863 nonIntJump.link(&jit);
864 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
865 if (jit.supportsFloatingPointRounding())
866 jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
868 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
870 SpecializedThunkJIT::JumpList doubleResult;
871 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
872 jit.returnInt32(SpecializedThunkJIT::regT0);
873 doubleResult.link(&jit);
874 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
875 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "trunc");
878 MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
880 SpecializedThunkJIT jit(vm, 1);
881 if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
882 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
883 MacroAssembler::Jump nonIntJump;
884 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
885 jit.returnInt32(SpecializedThunkJIT::regT0);
886 nonIntJump.link(&jit);
887 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
888 SpecializedThunkJIT::Jump intResult;
889 SpecializedThunkJIT::JumpList doubleResult;
890 if (jit.supportsFloatingPointTruncate()) {
891 jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
892 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
893 SpecializedThunkJIT::JumpList slowPath;
894 // Handle the negative doubles in the slow path for now.
895 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
896 jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
897 jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
898 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
899 intResult = jit.jump();
902 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
903 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
904 if (jit.supportsFloatingPointTruncate())
905 intResult.link(&jit);
906 jit.returnInt32(SpecializedThunkJIT::regT0);
907 doubleResult.link(&jit);
908 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
909 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
912 MacroAssemblerCodeRef expThunkGenerator(VM* vm)
914 if (!UnaryDoubleOpWrapper(exp))
915 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
916 SpecializedThunkJIT jit(vm, 1);
917 if (!jit.supportsFloatingPoint())
918 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
919 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
920 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
921 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
922 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
925 MacroAssemblerCodeRef logThunkGenerator(VM* vm)
927 if (!UnaryDoubleOpWrapper(log))
928 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
929 SpecializedThunkJIT jit(vm, 1);
930 if (!jit.supportsFloatingPoint())
931 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
932 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
933 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
934 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
935 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
938 MacroAssemblerCodeRef absThunkGenerator(VM* vm)
940 SpecializedThunkJIT jit(vm, 1);
941 if (!jit.supportsFloatingPointAbs())
942 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
945 unsigned virtualRegisterIndex = CallFrame::argumentOffset(0);
946 jit.load64(AssemblyHelpers::addressFor(virtualRegisterIndex), GPRInfo::regT0);
947 MacroAssembler::Jump notInteger = jit.branch64(MacroAssembler::Below, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister);
950 jit.rshift32(GPRInfo::regT0, MacroAssembler::TrustedImm32(31), GPRInfo::regT1);
951 jit.add32(GPRInfo::regT1, GPRInfo::regT0);
952 jit.xor32(GPRInfo::regT1, GPRInfo::regT0);
954 // IntMin cannot be inverted.
955 MacroAssembler::Jump integerIsIntMin = jit.branchTest32(MacroAssembler::Signed, GPRInfo::regT0);
958 jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
959 MacroAssembler::Jump doneWithIntegers = jit.jump();
962 notInteger.link(&jit);
963 jit.appendFailure(jit.branchTest64(MacroAssembler::Zero, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister));
964 jit.unboxDoubleWithoutAssertions(GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT0);
965 MacroAssembler::Label absFPR0Label = jit.label();
966 jit.absDouble(FPRInfo::fpRegT0, FPRInfo::fpRegT1);
967 jit.boxDouble(FPRInfo::fpRegT1, GPRInfo::regT0);
970 doneWithIntegers.link(&jit);
971 jit.returnJSValue(GPRInfo::regT0);
973 // We know the value of regT0 is IntMin. We could load that value from memory but
974 // it is simpler to just convert it.
975 integerIsIntMin.link(&jit);
976 jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
977 jit.jump().linkTo(absFPR0Label, &jit);
979 MacroAssembler::Jump nonIntJump;
980 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
981 jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
982 jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
983 jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
984 jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
985 jit.returnInt32(SpecializedThunkJIT::regT0);
986 nonIntJump.link(&jit);
987 // Shame about the double int conversion here.
988 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
989 jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
990 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
992 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
995 MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
997 SpecializedThunkJIT jit(vm, 2);
998 MacroAssembler::Jump nonIntArg0Jump;
999 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1000 SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1001 MacroAssembler::Jump nonIntArg1Jump;
1002 jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1003 SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1004 jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1005 jit.returnInt32(SpecializedThunkJIT::regT0);
1007 if (jit.supportsFloatingPointTruncate()) {
1008 nonIntArg0Jump.link(&jit);
1009 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1010 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1011 jit.appendFailure(jit.jump());
1013 jit.appendFailure(nonIntArg0Jump);
1015 if (jit.supportsFloatingPointTruncate()) {
1016 nonIntArg1Jump.link(&jit);
1017 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1018 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1019 jit.appendFailure(jit.jump());
1021 jit.appendFailure(nonIntArg1Jump);
1023 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1026 MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
1028 SpecializedThunkJIT jit(vm, 0);
1029 if (!jit.supportsFloatingPoint())
1030 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1033 jit.emitRandomThunk(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
1034 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1036 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
1038 return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1042 MacroAssemblerCodeRef boundThisNoArgsFunctionCallGenerator(VM* vm)
1044 CCallHelpers jit(vm);
1046 jit.emitFunctionPrologue();
1048 // Set up our call frame.
1049 jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock));
1050 jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCount));
1052 unsigned extraStackNeeded = 0;
1053 if (unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes())
1054 extraStackNeeded = stackAlignmentBytes() - stackMisalignment;
1056 // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail
1057 // call here as far as I can tell. At least not so long as the generic path doesn't do a tail
1058 // call, since that would be way too weird.
1060 // The formula for the number of stack bytes needed given some number of parameters (including
1063 // stackAlign((numParams + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC))
1065 // Probably we want to write this as:
1067 // stackAlign((numParams + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register))
1069 // That's really all there is to this. We have all the registers we need to do it.
1071 jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCount), GPRInfo::regT1);
1072 jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters), GPRInfo::regT1, GPRInfo::regT2);
1073 jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2);
1074 jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2);
1075 jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2);
1077 if (extraStackNeeded)
1078 jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2);
1080 // At this point regT1 has the actual argument count and regT2 has the amount of stack we will
1083 jit.subPtr(GPRInfo::regT2, CCallHelpers::stackPointerRegister);
1085 // Do basic callee frame setup, including 'this'.
1087 jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT3);
1089 jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCount));
1091 JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT2);
1092 jit.loadValue(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfBoundThis()), valueRegs);
1093 jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0));
1095 jit.loadPtr(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfTargetFunction()), GPRInfo::regT3);
1096 jit.storeCell(GPRInfo::regT3, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
1098 // OK, now we can start copying. This is a simple matter of copying parameters from the caller's
1099 // frame to the callee's frame. Note that we know that regT1 (the argument count) must be at
1101 jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1102 CCallHelpers::Jump done = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT1);
1104 CCallHelpers::Label loop = jit.label();
1105 jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1106 jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgument(1)).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight), valueRegs);
1107 jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
1108 jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT1).linkTo(loop, &jit);
1113 CCallHelpers::Address(GPRInfo::regT3, JSFunction::offsetOfExecutable()),
1116 CCallHelpers::Address(
1117 GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
1119 CCallHelpers::Jump noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0);
1121 emitPointerValidation(jit, GPRInfo::regT0);
1122 jit.call(GPRInfo::regT0);
1124 jit.emitFunctionEpilogue();
1127 LinkBuffer linkBuffer(*vm, jit, GLOBAL_THUNK_ID);
1128 linkBuffer.link(noCode, CodeLocationLabel(vm->jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
1129 return FINALIZE_CODE(
1130 linkBuffer, ("Specialized thunk for bound function calls with no arguments"));
1133 #if ENABLE(WEBASSEMBLY)
1134 MacroAssemblerCodeRef throwExceptionFromWasmThunkGenerator(VM* vm)
1136 CCallHelpers jit(vm);
1138 // The thing that jumps here must move ExceptionType into the argumentGPR1 and jump here.
1139 // We're allowed to use temp registers here, but not callee saves.
1141 RegisterSet usedRegisters = RegisterSet::stubUnavailableRegisters();
1142 usedRegisters.set(GPRInfo::argumentGPR1);
1143 jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(usedRegisters);
1146 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
1147 CCallHelpers::Call call = jit.call();
1148 jit.jumpToExceptionHandler();
1150 void (*throwWasmException)(ExecState*, Wasm::ExceptionType) = [] (ExecState* exec, Wasm::ExceptionType type) {
1151 VM* vm = &exec->vm();
1152 NativeCallFrameTracer tracer(vm, exec);
1155 auto throwScope = DECLARE_THROW_SCOPE(*vm);
1156 JSGlobalObject* globalObject = vm->topJSWebAssemblyInstance->globalObject();
1158 JSWebAssemblyRuntimeError* error = JSWebAssemblyRuntimeError::create(
1159 exec, globalObject->WebAssemblyRuntimeErrorStructure(), Wasm::errorMessageForExceptionType(type));
1160 throwException(exec, throwScope, error);
1163 genericUnwind(vm, exec);
1164 ASSERT(!!vm->callFrameForCatch);
1167 LinkBuffer linkBuffer(*vm, jit, GLOBAL_THUNK_ID);
1168 linkBuffer.link(call, throwWasmException);
1169 return FINALIZE_CODE(
1170 linkBuffer, ("Throw exception from Wasm"));
1172 #endif // ENABLE(WEBASSEMBLY)
1176 #endif // ENABLE(JIT)