2 * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLLowerDFGToB3.h"
32 #include "AirGenerationContext.h"
33 #include "AllowMacroScratchRegisterUsage.h"
34 #include "AtomicsObject.h"
35 #include "B3CheckValue.h"
36 #include "B3FenceValue.h"
37 #include "B3PatchpointValue.h"
38 #include "B3SlotBaseValue.h"
39 #include "B3StackmapGenerationParams.h"
40 #include "B3ValueInlines.h"
41 #include "CallFrameShuffler.h"
42 #include "CodeBlockWithJITType.h"
43 #include "DFGAbstractInterpreterInlines.h"
44 #include "DFGCapabilities.h"
45 #include "DFGDominators.h"
46 #include "DFGInPlaceAbstractState.h"
47 #include "DFGOSRAvailabilityAnalysisPhase.h"
48 #include "DFGOSRExitFuzz.h"
49 #include "DirectArguments.h"
50 #include "FTLAbstractHeapRepository.h"
51 #include "FTLAvailableRecovery.h"
52 #include "FTLExceptionTarget.h"
53 #include "FTLForOSREntryJITCode.h"
54 #include "FTLFormattedValue.h"
55 #include "FTLLazySlowPathCall.h"
56 #include "FTLLoweredNodeValue.h"
57 #include "FTLOperations.h"
58 #include "FTLOutput.h"
59 #include "FTLPatchpointExceptionHandle.h"
60 #include "FTLSnippetParams.h"
61 #include "FTLThunks.h"
62 #include "FTLWeightedTarget.h"
63 #include "JITAddGenerator.h"
64 #include "JITBitAndGenerator.h"
65 #include "JITBitOrGenerator.h"
66 #include "JITBitXorGenerator.h"
67 #include "JITDivGenerator.h"
68 #include "JITInlineCacheGenerator.h"
69 #include "JITLeftShiftGenerator.h"
70 #include "JITMathIC.h"
71 #include "JITMulGenerator.h"
72 #include "JITRightShiftGenerator.h"
73 #include "JITSubGenerator.h"
74 #include "JSAsyncFunction.h"
75 #include "JSAsyncGeneratorFunction.h"
76 #include "JSCInlines.h"
77 #include "JSGeneratorFunction.h"
78 #include "JSLexicalEnvironment.h"
80 #include "OperandsInlines.h"
81 #include "ScopedArguments.h"
82 #include "ScopedArgumentsTable.h"
83 #include "ScratchRegisterAllocator.h"
84 #include "SetupVarargsFrame.h"
85 #include "ShadowChicken.h"
86 #include "StructureStubInfo.h"
87 #include "SuperSampler.h"
88 #include "ThunkGenerators.h"
89 #include "VirtualRegister.h"
92 #include <unordered_set>
94 #include <wtf/Gigacage.h>
96 namespace JSC { namespace FTL {
103 std::atomic<int> compileCounter;
106 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
107 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
109 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
110 if (nodeIndex != UINT_MAX)
111 dataLog(", node @", nodeIndex);
117 // Using this instead of typeCheck() helps to reduce the load on B3, by creating
118 // significantly less dead code.
119 #define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
120 FormattedValue _ftc_lowValue = (lowValue); \
121 Edge _ftc_highValue = (highValue); \
122 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
123 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
125 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
128 #define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
129 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
132 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
134 LowerDFGToB3(State& state)
135 : m_graph(state.graph)
138 , m_proc(*state.proc)
139 , m_availabilityCalculator(m_graph)
140 , m_state(state.graph)
141 , m_interpreter(state.graph, m_state)
147 State* state = &m_ftlState;
150 if (verboseCompilationEnabled()) {
152 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
153 "_", codeBlock()->hash());
158 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
159 CodeBlock* codeBlock = m_graph.m_codeBlock;
161 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
162 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
163 AllowMacroScratchRegisterUsage allowScratch(jit);
164 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
165 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
166 jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
169 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
170 RELEASE_ASSERT(catchEntrypointIndex != 0);
171 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
174 if (m_graph.m_maxLocalsForCatchOSREntry) {
175 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
176 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
180 m_graph.ensureSSADominators();
182 if (verboseCompilationEnabled())
183 dataLog("Function ready, beginning lowering.\n");
185 m_out.initialize(m_heaps);
187 // We use prologue frequency for all of the initialization code.
188 m_out.setFrequency(1);
190 LBasicBlock prologue = m_out.newBlock();
191 LBasicBlock callEntrypointArgumentSpeculations = m_out.newBlock();
192 m_handleExceptions = m_out.newBlock();
194 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
195 m_highBlock = m_graph.block(blockIndex);
198 m_out.setFrequency(m_highBlock->executionCount);
199 m_blocks.add(m_highBlock, m_out.newBlock());
202 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
203 m_out.setFrequency(1);
205 m_out.appendTo(prologue, callEntrypointArgumentSpeculations);
206 m_out.initializeConstants(m_proc, prologue);
207 createPhiVariables();
209 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
210 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
211 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
212 state->capturedValue = capturedBase->slot();
214 auto preOrder = m_graph.blocksInPreOrder();
216 m_callFrame = m_out.framePointer();
217 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
218 m_tagMask = m_out.constInt64(TagMask);
220 // Make sure that B3 knows that we really care about the mask registers. This forces the
221 // constants to be materialized in registers.
222 m_proc.addFastConstant(m_tagTypeNumber->key());
223 m_proc.addFastConstant(m_tagMask->key());
225 // We don't want the CodeBlock to have a weak pointer to itself because
226 // that would cause it to always get collected.
227 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
229 VM* vm = &this->vm();
231 // Stack Overflow Check.
232 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
233 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
234 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
235 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
236 stackOverflowHandler->appendSomeRegister(m_callFrame);
237 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
238 stackOverflowHandler->numGPScratchRegisters = 1;
239 stackOverflowHandler->setGenerator(
240 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
241 AllowMacroScratchRegisterUsage allowScratch(jit);
242 GPRReg fp = params[0].gpr();
243 GPRReg scratch = params.gpScratch(0);
245 unsigned ftlFrameSize = params.proc().frameSize();
246 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
248 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
249 MacroAssembler::JumpList stackOverflow;
250 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
251 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
252 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
254 params.addLatePath([=] (CCallHelpers& jit) {
255 AllowMacroScratchRegisterUsage allowScratch(jit);
257 stackOverflow.link(&jit);
259 // FIXME: We would not have to do this if the stack check was part of the Air
260 // prologue. Then, we would know that there is no way for the callee-saves to
262 // https://bugs.webkit.org/show_bug.cgi?id=172456
263 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
266 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
267 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
268 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
270 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
271 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
272 CCallHelpers::Call throwCall = jit.call();
274 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
275 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
276 CCallHelpers::Call lookupExceptionHandlerCall = jit.call();
277 jit.jumpToExceptionHandler(*vm);
280 [=] (LinkBuffer& linkBuffer) {
281 linkBuffer.link(throwCall, FunctionPtr(operationThrowStackOverflowError));
282 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
287 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
290 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
291 successors[0] = callEntrypointArgumentSpeculations;
292 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
293 // Currently, the only other entrypoint is an op_catch entrypoint.
294 // We do OSR entry at op_catch, and we prove argument formats before
295 // jumping to FTL code, so we don't need to check argument types here
296 // for these entrypoints.
297 successors[i] = firstDFGBasicBlock;
300 m_out.entrySwitch(successors);
301 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
304 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
307 availabilityMap().clear();
308 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
309 for (unsigned i = codeBlock()->numParameters(); i--;) {
310 availabilityMap().m_locals.argument(i) =
311 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
314 for (unsigned i = codeBlock()->numParameters(); i--;) {
315 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
316 VirtualRegister operand = virtualRegisterForArgument(i);
317 LValue jsValue = m_out.load64(addressFor(operand));
319 switch (m_graph.m_argumentFormats[0][i]) {
321 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
324 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
327 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
332 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
336 m_out.jump(firstDFGBasicBlock);
340 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
341 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
342 m_out.patchpoint(Void)->setGenerator(
343 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
344 CCallHelpers::Jump jump = jit.jump();
346 [=] (LinkBuffer& linkBuffer) {
347 linkBuffer.link(jump, linkBuffer.locationOf(*exceptionHandler));
352 for (DFG::BasicBlock* block : preOrder)
355 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
356 // to happen last because our abstract heaps are generated lazily. They have to be
357 // generated lazily because we have an infinite number of numbered, indexed, and
358 // absolute heaps. We only become aware of the ones we actually mention while lowering.
359 m_heaps.computeRangesAndDecorateInstructions();
361 // We create all Phi's up front, but we may then decide not to compile the basic block
362 // that would have contained one of them. So this creates orphans, which triggers B3
363 // validation failures. Calling this fixes the issue.
365 // Note that you should avoid the temptation to make this call conditional upon
366 // validation being enabled. B3 makes no guarantees of any kind of correctness when
367 // dealing with IR that would have failed validation. For example, it would be valid to
368 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
369 // if any orphans were around. We might even have such phases already.
370 m_proc.deleteOrphans();
372 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
373 m_out.applyBlockOrder();
378 void createPhiVariables()
380 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
381 DFG::BasicBlock* block = m_graph.block(blockIndex);
384 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
385 Node* node = block->at(nodeIndex);
386 if (node->op() != DFG::Phi)
389 switch (node->flags() & NodeResultMask) {
390 case NodeResultDouble:
393 case NodeResultInt32:
396 case NodeResultInt52:
399 case NodeResultBoolean:
406 DFG_CRASH(m_graph, node, "Bad Phi node result type");
409 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
414 void compileBlock(DFG::BasicBlock* block)
419 if (verboseCompilationEnabled())
420 dataLog("Compiling block ", *block, "\n");
424 // Make sure that any blocks created while lowering code in the high block have the frequency of
425 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
426 // something roughly approximate for things like register allocation.
427 m_out.setFrequency(m_highBlock->executionCount);
429 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
432 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
433 m_nextHighBlock = m_graph.block(nextBlockIndex);
437 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
439 // All of this effort to find the next block gives us the ability to keep the
440 // generated IR in roughly program order. This ought not affect the performance
441 // of the generated code (since we expect B3 to reorder things) but it will
442 // make IR dumps easier to read.
443 m_out.appendTo(lowBlock, m_nextLowBlock);
445 if (Options::ftlCrashes())
448 if (!m_highBlock->cfaHasVisited) {
449 if (verboseCompilationEnabled())
450 dataLog("Bailing because CFA didn't reach.\n");
451 crash(m_highBlock, nullptr);
455 m_availabilityCalculator.beginBlock(m_highBlock);
458 m_state.beginBasicBlock(m_highBlock);
460 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
461 if (!compileNode(m_nodeIndex))
466 void safelyInvalidateAfterTermination()
468 if (verboseCompilationEnabled())
469 dataLog("Bailing.\n");
472 // Invalidate dominated blocks. Under normal circumstances we would expect
473 // them to be invalidated already. But you can have the CFA become more
474 // precise over time because the structures of objects change on the main
475 // thread. Failing to do this would result in weird crashes due to a value
476 // being used but not defined. Race conditions FTW!
477 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
478 DFG::BasicBlock* target = m_graph.block(blockIndex);
481 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
482 if (verboseCompilationEnabled())
483 dataLog("Block ", *target, " will bail also.\n");
484 target->cfaHasVisited = false;
489 bool compileNode(unsigned nodeIndex)
491 if (!m_state.isValid()) {
492 safelyInvalidateAfterTermination();
496 m_node = m_highBlock->at(nodeIndex);
497 m_origin = m_node->origin;
498 m_out.setOrigin(m_node);
500 if (verboseCompilationEnabled())
501 dataLog("Lowering ", m_node, "\n");
503 m_availableRecoveries.shrink(0);
505 m_interpreter.startExecuting();
506 m_interpreter.executeKnownEdgeTypes(m_node);
508 switch (m_node->op()) {
518 compileDoubleConstant();
521 compileInt52Constant();
524 compileLazyJSConstant();
530 compileDoubleAsInt32();
539 compileValueToInt32();
541 case BooleanToNumber:
542 compileBooleanToNumber();
544 case ExtractOSREntryLocal:
545 compileExtractOSREntryLocal();
547 case ExtractCatchLocal:
548 compileExtractCatchLocal();
560 case CallObjectConstructor:
561 compileToObjectOrCallObjectConstructor();
574 compileArithAddOrSub();
590 compileArithMinOrMax();
599 compileArithRandom();
617 compileArithFRound();
620 compileArithNegate();
644 compileUInt32ToNumber();
647 compileCheckStructure();
649 case CheckStructureOrEmpty:
650 compileCheckStructureOrEmpty();
656 compileCheckNotEmpty();
659 compileCheckBadCell();
661 case CheckStringIdent:
662 compileCheckStringIdent();
665 compileGetExecutable();
668 case ArrayifyToStructure:
672 compilePutStructure();
675 compileGetById(AccessType::TryGet);
679 compileGetById(AccessType::Get);
681 case GetByIdWithThis:
682 compileGetByIdWithThis();
688 compileHasOwnProperty();
695 case PutByIdWithThis:
696 compilePutByIdWithThis();
700 compilePutAccessorById();
702 case PutGetterSetterById:
703 compilePutGetterSetterById();
707 compilePutAccessorByVal();
713 compileDeleteByVal();
716 case GetButterflyWithoutCaging:
717 compileGetButterfly();
719 case ConstantStoragePointer:
720 compileConstantStoragePointer();
722 case GetIndexedPropertyStorage:
723 compileGetIndexedPropertyStorage();
729 compileGetArrayLength();
731 case GetVectorLength:
732 compileGetVectorLength();
735 compileCheckInBounds();
740 case GetMyArgumentByVal:
741 case GetMyArgumentByValOutOfBounds:
742 compileGetMyArgumentByVal();
744 case GetByValWithThis:
745 compileGetByValWithThis();
752 case PutByValWithThis:
753 compilePutByValWithThis();
757 case AtomicsCompareExchange:
758 case AtomicsExchange:
764 compileAtomicsReadModifyWrite();
766 case AtomicsIsLockFree:
767 compileAtomicsIsLockFree();
769 case DefineDataProperty:
770 compileDefineDataProperty();
772 case DefineAccessorProperty:
773 compileDefineAccessorProperty();
785 compileArrayIndexOf();
787 case CreateActivation:
788 compileCreateActivation();
791 compilePushWithScope();
794 case NewGeneratorFunction:
795 case NewAsyncGeneratorFunction:
796 case NewAsyncFunction:
797 compileNewFunction();
799 case CreateDirectArguments:
800 compileCreateDirectArguments();
802 case CreateScopedArguments:
803 compileCreateScopedArguments();
805 case CreateClonedArguments:
806 compileCreateClonedArguments();
811 case NewStringObject:
812 compileNewStringObject();
817 case NewArrayWithSpread:
818 compileNewArrayWithSpread();
824 compileNewArrayBuffer();
826 case NewArrayWithSize:
827 compileNewArrayWithSize();
830 compileNewTypedArray();
832 case GetTypedArrayByteOffset:
833 compileGetTypedArrayByteOffset();
836 compileGetPrototypeOf();
838 case AllocatePropertyStorage:
839 compileAllocatePropertyStorage();
841 case ReallocatePropertyStorage:
842 compileReallocatePropertyStorage();
844 case NukeStructureAndSetButterfly:
845 compileNukeStructureAndSetButterfly();
851 case CallStringConstructor:
852 compileToStringOrCallStringConstructor();
855 compileToPrimitive();
861 compileStringCharAt();
863 case StringCharCodeAt:
864 compileStringCharCodeAt();
866 case StringFromCharCode:
867 compileStringFromCharCode();
870 case GetGetterSetterByOffset:
871 compileGetByOffset();
879 case MultiGetByOffset:
880 compileMultiGetByOffset();
883 compilePutByOffset();
885 case MultiPutByOffset:
886 compileMultiPutByOffset();
889 case GetGlobalLexicalVariable:
890 compileGetGlobalVariable();
892 case PutGlobalVariable:
893 compilePutGlobalVariable();
896 compileNotifyWrite();
901 case GetArgumentCountIncludingThis:
902 compileGetArgumentCountIncludingThis();
910 case GetGlobalObject:
911 compileGetGlobalObject();
914 compileGetGlobalThis();
917 compileGetClosureVar();
920 compilePutClosureVar();
922 case GetFromArguments:
923 compileGetFromArguments();
926 compilePutToArguments();
929 compileGetArgument();
934 case CompareStrictEq:
935 compileCompareStrictEq();
938 compileCompareLess();
941 compileCompareLessEq();
944 compileCompareGreater();
946 case CompareGreaterEq:
947 compileCompareGreaterEq();
950 compileCompareBelow();
953 compileCompareBelowEq();
956 compileCompareEqPtr();
962 case TailCallInlinedCaller:
964 compileCallOrConstruct();
967 case DirectTailCallInlinedCaller:
968 case DirectConstruct:
970 compileDirectCallOrConstruct();
976 case CallForwardVarargs:
977 case TailCallVarargs:
978 case TailCallVarargsInlinedCaller:
979 case TailCallForwardVarargs:
980 case TailCallForwardVarargsInlinedCaller:
981 case ConstructVarargs:
982 case ConstructForwardVarargs:
983 compileCallOrConstructVarargs();
989 compileLoadVarargs();
992 compileForwardVarargs();
1003 case DFG::EntrySwitch:
1004 compileEntrySwitch();
1010 compileForceOSRExit();
1014 compileCPUIntrinsic();
1016 RELEASE_ASSERT_NOT_REACHED();
1022 case ThrowStaticError:
1023 compileThrowStaticError();
1025 case InvalidationPoint:
1026 compileInvalidationPoint();
1032 compileIsUndefined();
1040 case IsCellWithType:
1041 compileIsCellWithType();
1047 compileGetMapBucket();
1049 case GetMapBucketHead:
1050 compileGetMapBucketHead();
1052 case GetMapBucketNext:
1053 compileGetMapBucketNext();
1055 case LoadKeyFromMapBucket:
1056 compileLoadKeyFromMapBucket();
1058 case LoadValueFromMapBucket:
1059 compileLoadValueFromMapBucket();
1068 compileWeakMapGet();
1073 case IsObjectOrNull:
1074 compileIsObjectOrNull();
1077 compileIsFunction();
1079 case IsTypedArrayView:
1080 compileIsTypedArrayView();
1088 case CheckTypeInfoFlags:
1089 compileCheckTypeInfoFlags();
1091 case OverridesHasInstance:
1092 compileOverridesHasInstance();
1095 compileInstanceOf();
1097 case InstanceOfCustom:
1098 compileInstanceOfCustom();
1100 case CountExecution:
1101 compileCountExecution();
1103 case SuperSamplerBegin:
1104 compileSuperSamplerBegin();
1106 case SuperSamplerEnd:
1107 compileSuperSamplerEnd();
1110 case FencedStoreBarrier:
1111 compileStoreBarrier();
1113 case HasIndexedProperty:
1114 compileHasIndexedProperty();
1116 case HasGenericProperty:
1117 compileHasGenericProperty();
1119 case HasStructureProperty:
1120 compileHasStructureProperty();
1122 case GetDirectPname:
1123 compileGetDirectPname();
1125 case GetEnumerableLength:
1126 compileGetEnumerableLength();
1128 case GetPropertyEnumerator:
1129 compileGetPropertyEnumerator();
1131 case GetEnumeratorStructurePname:
1132 compileGetEnumeratorStructurePname();
1134 case GetEnumeratorGenericPname:
1135 compileGetEnumeratorGenericPname();
1138 compileToIndexString();
1140 case CheckStructureImmediate:
1141 compileCheckStructureImmediate();
1143 case MaterializeNewObject:
1144 compileMaterializeNewObject();
1146 case MaterializeCreateActivation:
1147 compileMaterializeCreateActivation();
1150 if (Options::usePollingTraps())
1151 compileCheckTraps();
1154 compileCreateRest();
1157 compileGetRestLength();
1160 compileRegExpExec();
1163 compileRegExpTest();
1168 case SetFunctionName:
1169 compileSetFunctionName();
1172 case StringReplaceRegExp:
1173 compileStringReplace();
1175 case GetRegExpObjectLastIndex:
1176 compileGetRegExpObjectLastIndex();
1178 case SetRegExpObjectLastIndex:
1179 compileSetRegExpObjectLastIndex();
1181 case LogShadowChickenPrologue:
1182 compileLogShadowChickenPrologue();
1184 case LogShadowChickenTail:
1185 compileLogShadowChickenTail();
1187 case RecordRegExpCachedResult:
1188 compileRecordRegExpCachedResult();
1190 case ResolveScopeForHoistingFuncDeclInEval:
1191 compileResolveScopeForHoistingFuncDeclInEval();
1194 compileResolveScope();
1197 compileGetDynamicVar();
1200 compilePutDynamicVar();
1203 compileUnreachable();
1206 compileStringSlice();
1209 compileToLowerCase();
1211 case NumberToStringWithRadix:
1212 compileNumberToStringWithRadix();
1214 case NumberToStringWithValidRadixConstant:
1215 compileNumberToStringWithValidRadixConstant();
1218 compileCheckSubClass();
1224 compileCallDOMGetter();
1232 case PhantomNewObject:
1233 case PhantomNewFunction:
1234 case PhantomNewGeneratorFunction:
1235 case PhantomNewAsyncGeneratorFunction:
1236 case PhantomNewAsyncFunction:
1237 case PhantomCreateActivation:
1238 case PhantomDirectArguments:
1239 case PhantomCreateRest:
1241 case PhantomNewArrayWithSpread:
1242 case PhantomClonedArguments:
1246 case InitializeEntrypointArguments:
1249 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1253 if (m_node->isTerminal())
1256 if (!m_state.isValid()) {
1257 safelyInvalidateAfterTermination();
1261 m_availabilityCalculator.executeNode(m_node);
1262 m_interpreter.executeEffects(nodeIndex);
1267 void compileUpsilon()
1269 LValue upsilonValue = nullptr;
1270 switch (m_node->child1().useKind()) {
1272 upsilonValue = lowDouble(m_node->child1());
1276 upsilonValue = lowInt32(m_node->child1());
1279 upsilonValue = lowInt52(m_node->child1());
1282 case KnownBooleanUse:
1283 upsilonValue = lowBoolean(m_node->child1());
1287 upsilonValue = lowCell(m_node->child1());
1290 upsilonValue = lowJSValue(m_node->child1());
1293 DFG_CRASH(m_graph, m_node, "Bad use kind");
1296 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1297 LValue phiNode = m_phis.get(m_node->phi());
1298 m_out.addIncomingToPhi(phiNode, upsilon);
1303 LValue phi = m_phis.get(m_node);
1304 m_out.m_block->append(phi);
1306 switch (m_node->flags() & NodeResultMask) {
1307 case NodeResultDouble:
1310 case NodeResultInt32:
1313 case NodeResultInt52:
1316 case NodeResultBoolean:
1323 DFG_CRASH(m_graph, m_node, "Bad use kind");
1328 void compileDoubleConstant()
1330 setDouble(m_out.constDouble(m_node->asNumber()));
1333 void compileInt52Constant()
1335 int64_t value = m_node->asAnyInt();
1337 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1338 setStrictInt52(m_out.constInt64(value));
1341 void compileLazyJSConstant()
1343 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1344 LazyJSValue value = m_node->lazyJSValue();
1345 patchpoint->setGenerator(
1346 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1347 value.emit(jit, JSValueRegs(params[0].gpr()));
1349 patchpoint->effects = Effects::none();
1350 setJSValue(patchpoint);
1353 void compileDoubleRep()
1355 switch (m_node->child1().useKind()) {
1356 case RealNumberUse: {
1357 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1359 LValue doubleValue = unboxDouble(value);
1361 LBasicBlock intCase = m_out.newBlock();
1362 LBasicBlock continuation = m_out.newBlock();
1364 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1366 m_out.doubleEqual(doubleValue, doubleValue),
1367 usually(continuation), rarely(intCase));
1369 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1372 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1373 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1374 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1375 m_out.jump(continuation);
1377 m_out.appendTo(continuation, lastNext);
1379 setDouble(m_out.phi(Double, fastResult, slowResult));
1385 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1387 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1389 LBasicBlock intCase = m_out.newBlock();
1390 LBasicBlock doubleTesting = m_out.newBlock();
1391 LBasicBlock doubleCase = m_out.newBlock();
1392 LBasicBlock nonDoubleCase = m_out.newBlock();
1393 LBasicBlock continuation = m_out.newBlock();
1396 isNotInt32(value, provenType(m_node->child1())),
1397 unsure(doubleTesting), unsure(intCase));
1399 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1401 ValueFromBlock intToDouble = m_out.anchor(
1402 m_out.intToDouble(unboxInt32(value)));
1403 m_out.jump(continuation);
1405 m_out.appendTo(doubleTesting, doubleCase);
1406 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1407 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1409 m_out.appendTo(doubleCase, nonDoubleCase);
1410 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1411 m_out.jump(continuation);
1413 if (shouldConvertNonNumber) {
1414 LBasicBlock undefinedCase = m_out.newBlock();
1415 LBasicBlock testNullCase = m_out.newBlock();
1416 LBasicBlock nullCase = m_out.newBlock();
1417 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1418 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1419 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1421 m_out.appendTo(nonDoubleCase, undefinedCase);
1422 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1423 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1425 m_out.appendTo(undefinedCase, testNullCase);
1426 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1427 m_out.jump(continuation);
1429 m_out.appendTo(testNullCase, nullCase);
1430 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1431 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1433 m_out.appendTo(nullCase, testBooleanTrueCase);
1434 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1435 m_out.jump(continuation);
1437 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1438 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1439 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1441 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1442 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1443 m_out.jump(continuation);
1445 m_out.appendTo(convertBooleanFalseCase, continuation);
1447 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1448 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
1449 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1450 m_out.jump(continuation);
1452 m_out.appendTo(continuation, lastNext);
1453 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1456 m_out.appendTo(nonDoubleCase, continuation);
1457 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1458 m_out.unreachable();
1460 m_out.appendTo(continuation, lastNext);
1462 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1467 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1472 DFG_CRASH(m_graph, m_node, "Bad use kind");
1476 void compileDoubleAsInt32()
1478 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1479 setInt32(integerValue);
1482 void compileValueRep()
1484 switch (m_node->child1().useKind()) {
1485 case DoubleRepUse: {
1486 LValue value = lowDouble(m_node->child1());
1488 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1489 value = m_out.select(
1490 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1493 setJSValue(boxDouble(value));
1498 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1503 DFG_CRASH(m_graph, m_node, "Bad use kind");
1507 void compileInt52Rep()
1509 switch (m_node->child1().useKind()) {
1511 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1516 jsValueToStrictInt52(
1517 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1520 case DoubleRepAnyIntUse:
1522 doubleToStrictInt52(
1523 m_node->child1(), lowDouble(m_node->child1())));
1527 RELEASE_ASSERT_NOT_REACHED();
1531 void compileValueToInt32()
1533 switch (m_node->child1().useKind()) {
1535 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1539 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1544 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1545 if (isValid(value)) {
1546 setInt32(value.value());
1550 value = m_jsValueValues.get(m_node->child1().node());
1551 if (isValid(value)) {
1552 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1556 // We'll basically just get here for constants. But it's good to have this
1557 // catch-all since we often add new representations into the mix.
1559 numberOrNotCellToInt32(
1561 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1566 DFG_CRASH(m_graph, m_node, "Bad use kind");
1571 void compileBooleanToNumber()
1573 switch (m_node->child1().useKind()) {
1575 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1580 LValue value = lowJSValue(m_node->child1());
1582 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1583 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1587 LBasicBlock booleanCase = m_out.newBlock();
1588 LBasicBlock continuation = m_out.newBlock();
1590 ValueFromBlock notBooleanResult = m_out.anchor(value);
1592 isBoolean(value, provenType(m_node->child1())),
1593 unsure(booleanCase), unsure(continuation));
1595 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1596 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1597 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1598 m_out.jump(continuation);
1600 m_out.appendTo(continuation, lastNext);
1601 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1606 RELEASE_ASSERT_NOT_REACHED();
1611 void compileExtractOSREntryLocal()
1613 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1614 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1615 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1618 void compileExtractCatchLocal()
1620 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
1621 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
1624 void compileGetStack()
1626 StackAccessData* data = m_node->stackAccessData();
1627 AbstractValue& value = m_state.variables().operand(data->local);
1629 DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
1630 DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
1632 if (isInt32Speculation(value.m_type))
1633 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1635 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1638 void compilePutStack()
1640 StackAccessData* data = m_node->stackAccessData();
1641 switch (data->format) {
1642 case FlushedJSValue: {
1643 LValue value = lowJSValue(m_node->child1());
1644 m_out.store64(value, addressFor(data->machineLocal));
1648 case FlushedDouble: {
1649 LValue value = lowDouble(m_node->child1());
1650 m_out.storeDouble(value, addressFor(data->machineLocal));
1654 case FlushedInt32: {
1655 LValue value = lowInt32(m_node->child1());
1656 m_out.store32(value, payloadFor(data->machineLocal));
1660 case FlushedInt52: {
1661 LValue value = lowInt52(m_node->child1());
1662 m_out.store64(value, addressFor(data->machineLocal));
1667 LValue value = lowCell(m_node->child1());
1668 m_out.store64(value, addressFor(data->machineLocal));
1672 case FlushedBoolean: {
1673 speculateBoolean(m_node->child1());
1675 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1676 addressFor(data->machineLocal));
1681 DFG_CRASH(m_graph, m_node, "Bad flush format");
1688 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1691 void compileToObjectOrCallObjectConstructor()
1693 LValue value = lowJSValue(m_node->child1());
1695 LBasicBlock isCellCase = m_out.newBlock();
1696 LBasicBlock slowCase = m_out.newBlock();
1697 LBasicBlock continuation = m_out.newBlock();
1699 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1701 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1702 ValueFromBlock fastResult = m_out.anchor(value);
1703 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
1705 m_out.appendTo(slowCase, continuation);
1707 ValueFromBlock slowResult;
1708 if (m_node->op() == ToObject) {
1709 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
1710 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToObject), m_callFrame, weakPointer(globalObject), value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
1712 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCallObjectConstructor), m_callFrame, frozenPointer(m_node->cellOperand()), value));
1713 m_out.jump(continuation);
1715 m_out.appendTo(continuation, lastNext);
1716 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1719 void compileToThis()
1721 LValue value = lowJSValue(m_node->child1());
1723 LBasicBlock isCellCase = m_out.newBlock();
1724 LBasicBlock slowCase = m_out.newBlock();
1725 LBasicBlock continuation = m_out.newBlock();
1728 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1730 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1731 ValueFromBlock fastResult = m_out.anchor(value);
1734 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
1735 m_out.constInt32(OverridesToThis)),
1736 usually(continuation), rarely(slowCase));
1738 m_out.appendTo(slowCase, continuation);
1739 J_JITOperation_EJ function;
1740 if (m_graph.isStrictModeFor(m_node->origin.semantic))
1741 function = operationToThisStrict;
1743 function = operationToThis;
1744 ValueFromBlock slowResult = m_out.anchor(
1745 vmCall(Int64, m_out.operation(function), m_callFrame, value));
1746 m_out.jump(continuation);
1748 m_out.appendTo(continuation, lastNext);
1749 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1752 void compileValueAdd()
1754 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
1755 JITAddIC* addIC = codeBlock()->addJITAddIC(arithProfile);
1756 auto repatchingFunction = operationValueAddOptimize;
1757 auto nonRepatchingFunction = operationValueAdd;
1758 compileMathIC(addIC, repatchingFunction, nonRepatchingFunction);
1761 template <typename Generator>
1762 void compileMathIC(JITUnaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1764 Node* node = m_node;
1766 LValue operand = lowJSValue(node->child1());
1768 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1769 patchpoint->appendSomeRegister(operand);
1770 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1771 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1772 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
1773 patchpoint->numGPScratchRegisters = 1;
1774 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1775 State* state = &m_ftlState;
1776 patchpoint->setGenerator(
1777 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1778 AllowMacroScratchRegisterUsage allowScratch(jit);
1780 Box<CCallHelpers::JumpList> exceptions =
1781 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1783 #if ENABLE(MATH_IC_STATS)
1784 auto inlineStart = jit.label();
1787 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1788 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
1790 bool shouldEmitProfiling = false;
1791 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1793 if (generatedInline) {
1794 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1795 auto done = jit.label();
1796 params.addLatePath([=] (CCallHelpers& jit) {
1797 AllowMacroScratchRegisterUsage allowScratch(jit);
1798 mathICGenerationState->slowPathJumps.link(&jit);
1799 mathICGenerationState->slowPathStart = jit.label();
1800 #if ENABLE(MATH_IC_STATS)
1801 auto slowPathStart = jit.label();
1804 if (mathICGenerationState->shouldSlowPathRepatch) {
1805 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1806 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1807 mathICGenerationState->slowPathCall = call.call();
1809 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1810 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1811 mathICGenerationState->slowPathCall = call.call();
1813 jit.jump().linkTo(done, &jit);
1815 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1816 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1819 #if ENABLE(MATH_IC_STATS)
1820 auto slowPathEnd = jit.label();
1821 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1822 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
1823 mathIC->m_generatedCodeSize += size;
1829 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1830 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1833 #if ENABLE(MATH_IC_STATS)
1834 auto inlineEnd = jit.label();
1835 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1836 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
1837 mathIC->m_generatedCodeSize += size;
1842 setJSValue(patchpoint);
1845 template <typename Generator>
1846 void compileMathIC(JITBinaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1848 Node* node = m_node;
1850 LValue left = lowJSValue(node->child1());
1851 LValue right = lowJSValue(node->child2());
1853 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
1854 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
1856 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1857 patchpoint->appendSomeRegister(left);
1858 patchpoint->appendSomeRegister(right);
1859 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1860 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1861 RefPtr<PatchpointExceptionHandle> exceptionHandle =
1862 preparePatchpointForExceptions(patchpoint);
1863 patchpoint->numGPScratchRegisters = 1;
1864 patchpoint->numFPScratchRegisters = 2;
1865 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1866 State* state = &m_ftlState;
1867 patchpoint->setGenerator(
1868 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1869 AllowMacroScratchRegisterUsage allowScratch(jit);
1871 Box<CCallHelpers::JumpList> exceptions =
1872 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1874 #if ENABLE(MATH_IC_STATS)
1875 auto inlineStart = jit.label();
1878 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1879 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
1880 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
1881 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
1883 bool shouldEmitProfiling = false;
1884 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1886 if (generatedInline) {
1887 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1888 auto done = jit.label();
1889 params.addLatePath([=] (CCallHelpers& jit) {
1890 AllowMacroScratchRegisterUsage allowScratch(jit);
1891 mathICGenerationState->slowPathJumps.link(&jit);
1892 mathICGenerationState->slowPathStart = jit.label();
1893 #if ENABLE(MATH_IC_STATS)
1894 auto slowPathStart = jit.label();
1897 if (mathICGenerationState->shouldSlowPathRepatch) {
1898 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1899 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1900 mathICGenerationState->slowPathCall = call.call();
1902 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1903 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1904 mathICGenerationState->slowPathCall = call.call();
1906 jit.jump().linkTo(done, &jit);
1908 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1909 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1912 #if ENABLE(MATH_IC_STATS)
1913 auto slowPathEnd = jit.label();
1914 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1915 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
1916 mathIC->m_generatedCodeSize += size;
1922 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1923 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1926 #if ENABLE(MATH_IC_STATS)
1927 auto inlineEnd = jit.label();
1928 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1929 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
1930 mathIC->m_generatedCodeSize += size;
1935 setJSValue(patchpoint);
1938 void compileStrCat()
1941 if (m_node->child3()) {
1943 Int64, m_out.operation(operationStrCat3), m_callFrame,
1944 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1945 lowJSValue(m_node->child2(), ManualOperandSpeculation),
1946 lowJSValue(m_node->child3(), ManualOperandSpeculation));
1949 Int64, m_out.operation(operationStrCat2), m_callFrame,
1950 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1951 lowJSValue(m_node->child2(), ManualOperandSpeculation));
1956 void compileArithAddOrSub()
1958 bool isSub = m_node->op() == ArithSub;
1959 switch (m_node->binaryUseKind()) {
1961 LValue left = lowInt32(m_node->child1());
1962 LValue right = lowInt32(m_node->child2());
1964 if (!shouldCheckOverflow(m_node->arithMode())) {
1965 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
1969 CheckValue* result =
1970 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1971 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1977 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)
1978 && !abstractValue(m_node->child2()).couldBeType(SpecInt52Only)) {
1980 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1981 LValue right = lowInt52(m_node->child2(), kind);
1982 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
1986 LValue left = lowInt52(m_node->child1());
1987 LValue right = lowInt52(m_node->child2());
1988 CheckValue* result =
1989 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1990 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1995 case DoubleRepUse: {
1996 LValue C1 = lowDouble(m_node->child1());
1997 LValue C2 = lowDouble(m_node->child2());
1999 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2005 DFG_CRASH(m_graph, m_node, "Bad use kind");
2009 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2010 JITSubIC* subIC = codeBlock()->addJITSubIC(arithProfile);
2011 auto repatchingFunction = operationValueSubOptimize;
2012 auto nonRepatchingFunction = operationValueSub;
2013 compileMathIC(subIC, repatchingFunction, nonRepatchingFunction);
2018 DFG_CRASH(m_graph, m_node, "Bad use kind");
2023 void compileArithClz32()
2025 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2026 LValue operand = lowInt32(m_node->child1());
2027 setInt32(m_out.ctlz32(operand));
2030 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2031 LValue argument = lowJSValue(m_node->child1());
2032 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
2036 void compileArithMul()
2038 switch (m_node->binaryUseKind()) {
2040 LValue left = lowInt32(m_node->child1());
2041 LValue right = lowInt32(m_node->child2());
2045 if (!shouldCheckOverflow(m_node->arithMode()))
2046 result = m_out.mul(left, right);
2048 CheckValue* speculation = m_out.speculateMul(left, right);
2049 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2050 result = speculation;
2053 if (shouldCheckNegativeZero(m_node->arithMode())) {
2054 LBasicBlock slowCase = m_out.newBlock();
2055 LBasicBlock continuation = m_out.newBlock();
2058 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2060 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2061 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2062 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2063 m_out.jump(continuation);
2064 m_out.appendTo(continuation, lastNext);
2073 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2074 LValue right = lowInt52(m_node->child2(), opposite(kind));
2076 CheckValue* result = m_out.speculateMul(left, right);
2077 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2079 if (shouldCheckNegativeZero(m_node->arithMode())) {
2080 LBasicBlock slowCase = m_out.newBlock();
2081 LBasicBlock continuation = m_out.newBlock();
2084 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2086 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2087 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2088 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2089 m_out.jump(continuation);
2090 m_out.appendTo(continuation, lastNext);
2097 case DoubleRepUse: {
2099 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2104 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2105 JITMulIC* mulIC = codeBlock()->addJITMulIC(arithProfile);
2106 auto repatchingFunction = operationValueMulOptimize;
2107 auto nonRepatchingFunction = operationValueMul;
2108 compileMathIC(mulIC, repatchingFunction, nonRepatchingFunction);
2113 DFG_CRASH(m_graph, m_node, "Bad use kind");
2118 void compileArithDiv()
2120 switch (m_node->binaryUseKind()) {
2122 LValue numerator = lowInt32(m_node->child1());
2123 LValue denominator = lowInt32(m_node->child2());
2125 if (shouldCheckNegativeZero(m_node->arithMode())) {
2126 LBasicBlock zeroNumerator = m_out.newBlock();
2127 LBasicBlock numeratorContinuation = m_out.newBlock();
2130 m_out.isZero32(numerator),
2131 rarely(zeroNumerator), usually(numeratorContinuation));
2133 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2136 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2138 m_out.jump(numeratorContinuation);
2140 m_out.appendTo(numeratorContinuation, innerLastNext);
2143 if (shouldCheckOverflow(m_node->arithMode())) {
2144 LBasicBlock unsafeDenominator = m_out.newBlock();
2145 LBasicBlock continuation = m_out.newBlock();
2147 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2149 m_out.above(adjustedDenominator, m_out.int32One),
2150 usually(continuation), rarely(unsafeDenominator));
2152 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2153 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2154 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2155 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2156 m_out.jump(continuation);
2158 m_out.appendTo(continuation, lastNext);
2159 LValue result = m_out.div(numerator, denominator);
2161 Overflow, noValue(), 0,
2162 m_out.notEqual(m_out.mul(result, denominator), numerator));
2165 setInt32(m_out.chillDiv(numerator, denominator));
2170 case DoubleRepUse: {
2171 setDouble(m_out.doubleDiv(
2172 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2177 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2182 DFG_CRASH(m_graph, m_node, "Bad use kind");
2187 void compileArithMod()
2189 switch (m_node->binaryUseKind()) {
2191 LValue numerator = lowInt32(m_node->child1());
2192 LValue denominator = lowInt32(m_node->child2());
2195 if (shouldCheckOverflow(m_node->arithMode())) {
2196 LBasicBlock unsafeDenominator = m_out.newBlock();
2197 LBasicBlock continuation = m_out.newBlock();
2199 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2201 m_out.above(adjustedDenominator, m_out.int32One),
2202 usually(continuation), rarely(unsafeDenominator));
2204 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2205 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2206 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2207 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2208 m_out.jump(continuation);
2210 m_out.appendTo(continuation, lastNext);
2211 LValue result = m_out.mod(numerator, denominator);
2214 remainder = m_out.chillMod(numerator, denominator);
2216 if (shouldCheckNegativeZero(m_node->arithMode())) {
2217 LBasicBlock negativeNumerator = m_out.newBlock();
2218 LBasicBlock numeratorContinuation = m_out.newBlock();
2221 m_out.lessThan(numerator, m_out.int32Zero),
2222 unsure(negativeNumerator), unsure(numeratorContinuation));
2224 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2226 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2228 m_out.jump(numeratorContinuation);
2230 m_out.appendTo(numeratorContinuation, innerLastNext);
2233 setInt32(remainder);
2237 case DoubleRepUse: {
2239 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2244 DFG_CRASH(m_graph, m_node, "Bad use kind");
2249 void compileArithMinOrMax()
2251 switch (m_node->binaryUseKind()) {
2253 LValue left = lowInt32(m_node->child1());
2254 LValue right = lowInt32(m_node->child2());
2258 m_node->op() == ArithMin
2259 ? m_out.lessThan(left, right)
2260 : m_out.lessThan(right, left),
2265 case DoubleRepUse: {
2266 LValue left = lowDouble(m_node->child1());
2267 LValue right = lowDouble(m_node->child2());
2269 LBasicBlock notLessThan = m_out.newBlock();
2270 LBasicBlock continuation = m_out.newBlock();
2272 Vector<ValueFromBlock, 2> results;
2274 results.append(m_out.anchor(left));
2276 m_node->op() == ArithMin
2277 ? m_out.doubleLessThan(left, right)
2278 : m_out.doubleGreaterThan(left, right),
2279 unsure(continuation), unsure(notLessThan));
2281 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2282 results.append(m_out.anchor(m_out.select(
2283 m_node->op() == ArithMin
2284 ? m_out.doubleGreaterThanOrEqual(left, right)
2285 : m_out.doubleLessThanOrEqual(left, right),
2286 right, m_out.constDouble(PNaN))));
2287 m_out.jump(continuation);
2289 m_out.appendTo(continuation, lastNext);
2290 setDouble(m_out.phi(Double, results));
2295 DFG_CRASH(m_graph, m_node, "Bad use kind");
2300 void compileArithAbs()
2302 switch (m_node->child1().useKind()) {
2304 LValue value = lowInt32(m_node->child1());
2306 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2307 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2309 if (shouldCheckOverflow(m_node->arithMode()))
2310 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2316 case DoubleRepUse: {
2317 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2322 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2323 LValue argument = lowJSValue(m_node->child1());
2324 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2331 void compileArithUnary()
2333 if (m_node->child1().useKind() == DoubleRepUse) {
2334 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
2337 LValue argument = lowJSValue(m_node->child1());
2338 LValue result = vmCall(Double, m_out.operation(DFG::arithUnaryOperation(m_node->arithUnaryType())), m_callFrame, argument);
2342 void compileArithPow()
2344 if (m_node->child2().useKind() == Int32Use)
2345 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2347 LValue base = lowDouble(m_node->child1());
2348 LValue exponent = lowDouble(m_node->child2());
2350 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2351 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2352 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2353 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2354 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2355 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2356 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2357 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2358 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2359 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2360 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2361 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2362 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2363 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2364 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2365 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2366 LBasicBlock powBlock = m_out.newBlock();
2367 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2368 LBasicBlock continuation = m_out.newBlock();
2370 LValue integerExponent = m_out.doubleToInt(exponent);
2371 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2372 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2373 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2375 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2376 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2377 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2379 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2380 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2381 m_out.jump(continuation);
2383 // If y is NaN, the result is NaN.
2384 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2385 LValue exponentIsNaN;
2386 if (provenType(m_node->child2()) & SpecDoubleNaN)
2387 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2389 exponentIsNaN = m_out.booleanFalse;
2390 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2392 // If abs(x) is 1 and y is +infinity, the result is NaN.
2393 // If abs(x) is 1 and y is -infinity, the result is NaN.
2395 // Test if base == 1.
2396 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2397 LValue absoluteBase = m_out.doubleAbs(base);
2398 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2399 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2401 // Test if abs(y) == Infinity.
2402 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2403 LValue absoluteExponent = m_out.doubleAbs(exponent);
2404 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2405 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2407 // If y == 0.5 or y == -0.5, handle it through SQRT.
2408 // We have be carefuly with -0 and -Infinity.
2411 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2412 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2413 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2416 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2417 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2418 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2419 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2421 // Test if abs(x) == Infinity.
2422 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2423 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2424 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2426 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2427 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2428 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2429 m_out.jump(continuation);
2431 // The exponent is 0.5, the base is infinite, the result is always infinite.
2432 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2433 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2434 m_out.jump(continuation);
2436 // Test if y == -0.5
2437 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2438 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2439 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2442 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2443 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2444 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2446 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2447 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2448 m_out.jump(continuation);
2450 // Test if abs(x) == Infinity.
2451 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2452 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2453 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2455 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2456 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2457 LValue sqrtBase = m_out.doubleSqrt(base);
2458 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2459 m_out.jump(continuation);
2461 // The exponent is -0.5, the base is infinite, the result is always zero.
2462 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2463 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2464 m_out.jump(continuation);
2466 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2467 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2468 m_out.jump(continuation);
2470 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2471 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2472 m_out.jump(continuation);
2474 m_out.appendTo(continuation, lastNext);
2475 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2479 void compileArithRandom()
2481 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2483 // Inlined WeakRandom::advance().
2484 // uint64_t x = m_low;
2485 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2486 LValue low = m_out.load64(m_out.absolute(lowAddress));
2487 // uint64_t y = m_high;
2488 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2489 LValue high = m_out.load64(m_out.absolute(highAddress));
2491 m_out.store64(high, m_out.absolute(lowAddress));
2494 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2497 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2499 // x ^= y ^ (y >> 26);
2500 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2503 m_out.store64(phase3, m_out.absolute(highAddress));
2506 LValue random64 = m_out.add(phase3, high);
2508 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2509 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2511 LValue double53Integer = m_out.intToDouble(random53);
2513 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2514 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2515 static const double scale = 1.0 / (1ULL << 53);
2517 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2518 // It just reduces the exp part of the given 53bit double integer.
2519 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2520 // Now we get 53bit precision random double value in [0, 1).
2521 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2526 void compileArithRound()
2528 if (m_node->child1().useKind() == DoubleRepUse) {
2529 LValue result = nullptr;
2530 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2531 LValue value = lowDouble(m_node->child1());
2532 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2534 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2535 LBasicBlock continuation = m_out.newBlock();
2537 LValue value = lowDouble(m_node->child1());
2538 LValue integerValue = m_out.doubleCeil(value);
2539 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2541 LValue realPart = m_out.doubleSub(integerValue, value);
2543 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2545 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2546 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2547 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2548 m_out.jump(continuation);
2549 m_out.appendTo(continuation, lastNext);
2551 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2554 if (producesInteger(m_node->arithRoundingMode())) {
2555 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2556 setInt32(integerValue);
2562 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2563 LValue argument = lowJSValue(m_node->child1());
2564 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2567 void compileArithFloor()
2569 if (m_node->child1().useKind() == DoubleRepUse) {
2570 LValue value = lowDouble(m_node->child1());
2571 LValue integerValue = m_out.doubleFloor(value);
2572 if (producesInteger(m_node->arithRoundingMode()))
2573 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2575 setDouble(integerValue);
2578 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2579 LValue argument = lowJSValue(m_node->child1());
2580 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2583 void compileArithCeil()
2585 if (m_node->child1().useKind() == DoubleRepUse) {
2586 LValue value = lowDouble(m_node->child1());
2587 LValue integerValue = m_out.doubleCeil(value);
2588 if (producesInteger(m_node->arithRoundingMode()))
2589 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2591 setDouble(integerValue);
2594 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2595 LValue argument = lowJSValue(m_node->child1());
2596 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2599 void compileArithTrunc()
2601 if (m_node->child1().useKind() == DoubleRepUse) {
2602 LValue value = lowDouble(m_node->child1());
2603 LValue result = m_out.doubleTrunc(value);
2604 if (producesInteger(m_node->arithRoundingMode()))
2605 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2610 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2611 LValue argument = lowJSValue(m_node->child1());
2612 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
2615 void compileArithSqrt()
2617 if (m_node->child1().useKind() == DoubleRepUse) {
2618 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
2621 LValue argument = lowJSValue(m_node->child1());
2622 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
2626 void compileArithFRound()
2628 if (m_node->child1().useKind() == DoubleRepUse) {
2629 setDouble(m_out.fround(lowDouble(m_node->child1())));
2632 LValue argument = lowJSValue(m_node->child1());
2633 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
2637 void compileArithNegate()
2639 switch (m_node->child1().useKind()) {
2641 LValue value = lowInt32(m_node->child1());
2644 if (!shouldCheckOverflow(m_node->arithMode()))
2645 result = m_out.neg(value);
2646 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
2647 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
2648 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
2651 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
2652 result = m_out.neg(value);
2660 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)) {
2662 LValue value = lowWhicheverInt52(m_node->child1(), kind);
2663 LValue result = m_out.neg(value);
2664 if (shouldCheckNegativeZero(m_node->arithMode()))
2665 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2666 setInt52(result, kind);
2670 LValue value = lowInt52(m_node->child1());
2671 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
2672 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
2673 if (shouldCheckNegativeZero(m_node->arithMode()))
2674 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2679 case DoubleRepUse: {
2680 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
2685 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2686 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2687 JITNegIC* negIC = codeBlock()->addJITNegIC(arithProfile);
2688 auto repatchingFunction = operationArithNegateOptimize;
2689 auto nonRepatchingFunction = operationArithNegate;
2690 compileMathIC(negIC, repatchingFunction, nonRepatchingFunction);
2695 void compileBitAnd()
2697 if (m_node->isBinaryUseKind(UntypedUse)) {
2698 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
2701 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2706 if (m_node->isBinaryUseKind(UntypedUse)) {
2707 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
2710 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2713 void compileBitXor()
2715 if (m_node->isBinaryUseKind(UntypedUse)) {
2716 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
2719 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2722 void compileBitRShift()
2724 if (m_node->isBinaryUseKind(UntypedUse)) {
2725 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
2728 setInt32(m_out.aShr(
2729 lowInt32(m_node->child1()),
2730 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2733 void compileBitLShift()
2735 if (m_node->isBinaryUseKind(UntypedUse)) {
2736 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
2740 lowInt32(m_node->child1()),
2741 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2744 void compileBitURShift()
2746 if (m_node->isBinaryUseKind(UntypedUse)) {
2747 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
2750 setInt32(m_out.lShr(
2751 lowInt32(m_node->child1()),
2752 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2755 void compileUInt32ToNumber()
2757 LValue value = lowInt32(m_node->child1());
2759 if (doesOverflow(m_node->arithMode())) {
2760 setStrictInt52(m_out.zeroExtPtr(value));
2764 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
2768 void compileCheckStructure()
2771 if (m_node->child1()->hasConstant())
2772 exitKind = BadConstantCache;
2774 exitKind = BadCache;
2776 switch (m_node->child1().useKind()) {
2778 case KnownCellUse: {
2779 LValue cell = lowCell(m_node->child1());
2782 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2783 exitKind, m_node->structureSet(),
2784 [&] (RegisteredStructure structure) {
2785 return weakStructureID(structure);
2790 case CellOrOtherUse: {
2791 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
2793 LBasicBlock cellCase = m_out.newBlock();
2794 LBasicBlock notCellCase = m_out.newBlock();
2795 LBasicBlock continuation = m_out.newBlock();
2798 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2800 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2802 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
2803 exitKind, m_node->structureSet(),
2804 [&] (RegisteredStructure structure) {
2805 return weakStructureID(structure);
2807 m_out.jump(continuation);
2809 m_out.appendTo(notCellCase, continuation);
2810 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
2811 m_out.jump(continuation);
2813 m_out.appendTo(continuation, lastNext);
2818 DFG_CRASH(m_graph, m_node, "Bad use kind");
2823 void compileCheckStructureOrEmpty()
2826 if (m_node->child1()->hasConstant())
2827 exitKind = BadConstantCache;
2829 exitKind = BadCache;
2831 LValue cell = lowCell(m_node->child1());
2832 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
2833 LBasicBlock notEmpty;
2834 LBasicBlock continuation;
2835 LBasicBlock lastNext;
2836 if (maySeeEmptyValue) {
2837 notEmpty = m_out.newBlock();
2838 continuation = m_out.newBlock();
2839 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
2840 lastNext = m_out.appendTo(notEmpty, continuation);
2844 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2845 exitKind, m_node->structureSet(),
2846 [&] (RegisteredStructure structure) {
2847 return weakStructureID(structure);
2850 if (maySeeEmptyValue) {
2851 m_out.jump(continuation);
2852 m_out.appendTo(continuation, lastNext);
2856 void compileCheckCell()
2858 LValue cell = lowCell(m_node->child1());
2861 BadCell, jsValueValue(cell), m_node->child1().node(),
2862 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
2865 void compileCheckBadCell()
2870 void compileCheckNotEmpty()
2872 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
2875 void compileCheckStringIdent()
2877 UniquedStringImpl* uid = m_node->uidOperand();
2878 LValue stringImpl = lowStringIdent(m_node->child1());
2879 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
2882 void compileGetExecutable()
2884 LValue cell = lowCell(m_node->child1());
2885 speculateFunction(m_node->child1(), cell);
2886 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
2889 void compileArrayify()
2891 LValue cell = lowCell(m_node->child1());
2892 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
2894 LBasicBlock unexpectedStructure = m_out.newBlock();
2895 LBasicBlock continuation = m_out.newBlock();
2897 auto isUnexpectedArray = [&] (LValue cell) {
2898 if (m_node->op() == Arrayify)
2899 return m_out.logicalNot(isArrayTypeForArrayify(cell, m_node->arrayMode()));
2901 ASSERT(m_node->op() == ArrayifyToStructure);
2902 return m_out.notEqual(m_out.load32(cell, m_heaps.JSCell_structureID), weakStructureID(m_node->structure()));
2905 m_out.branch(isUnexpectedArray(cell), rarely(unexpectedStructure), usually(continuation));
2907 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
2910 switch (m_node->arrayMode().type()) {
2913 case Array::Contiguous:
2915 Uncountable, noValue(), 0,
2916 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
2923 switch (m_node->arrayMode().type()) {
2925 vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
2928 vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
2930 case Array::Contiguous:
2931 vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
2933 case Array::ArrayStorage:
2934 case Array::SlowPutArrayStorage:
2935 vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
2938 DFG_CRASH(m_graph, m_node, "Bad array type");
2942 speculate(BadIndexingType, jsValueValue(cell), 0, isUnexpectedArray(cell));
2943 m_out.jump(continuation);
2945 m_out.appendTo(continuation, lastNext);
2948 void compilePutStructure()
2950 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
2952 RegisteredStructure oldStructure = m_node->transition()->previous;
2953 RegisteredStructure newStructure = m_node->transition()->next;
2954 ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
2955 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
2956 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
2958 LValue cell = lowCell(m_node->child1());
2960 weakStructureID(newStructure),
2961 cell, m_heaps.JSCell_structureID);
2964 void compileGetById(AccessType type)
2966 ASSERT(type == AccessType::Get || type == AccessType::TryGet);
2967 switch (m_node->child1().useKind()) {
2969 setJSValue(getById(lowCell(m_node->child1()), type));
2974 // This is pretty weird, since we duplicate the slow path both here and in the
2975 // code generated by the IC. We should investigate making this less bad.
2976 // https://bugs.webkit.org/show_bug.cgi?id=127830
2977 LValue value = lowJSValue(m_node->child1());
2979 LBasicBlock cellCase = m_out.newBlock();
2980 LBasicBlock notCellCase = m_out.newBlock();
2981 LBasicBlock continuation = m_out.newBlock();
2984 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2986 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2987 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
2988 m_out.jump(continuation);
2990 J_JITOperation_EJI getByIdFunction;
2991 if (type == AccessType::Get)
2992 getByIdFunction = operationGetByIdGeneric;
2994 getByIdFunction = operationTryGetByIdGeneric;
2996 m_out.appendTo(notCellCase, continuation);
2997 ValueFromBlock notCellResult = m_out.anchor(vmCall(
2998 Int64, m_out.operation(getByIdFunction),
3000 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3001 m_out.jump(continuation);
3003 m_out.appendTo(continuation, lastNext);
3004 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3009 DFG_CRASH(m_graph, m_node, "Bad use kind");
3014 void compileGetByIdWithThis()
3016 if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
3017 setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
3019 LValue base = lowJSValue(m_node->child1());
3020 LValue thisValue = lowJSValue(m_node->child2());
3022 LBasicBlock baseCellCase = m_out.newBlock();
3023 LBasicBlock notCellCase = m_out.newBlock();
3024 LBasicBlock thisValueCellCase = m_out.newBlock();
3025 LBasicBlock continuation = m_out.newBlock();
3028 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
3030 LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
3033 isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
3035 m_out.appendTo(thisValueCellCase, notCellCase);
3036 ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
3037 m_out.jump(continuation);
3039 m_out.appendTo(notCellCase, continuation);
3040 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3041 Int64, m_out.operation(operationGetByIdWithThis),
3042 m_callFrame, base, thisValue,
3043 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3044 m_out.jump(continuation);
3046 m_out.appendTo(continuation, lastNext);
3047 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3052 void compileGetByValWithThis()
3054 LValue base = lowJSValue(m_node->child1());
3055 LValue thisValue = lowJSValue(m_node->child2());
3056 LValue subscript = lowJSValue(m_node->child3());
3058 LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
3062 void compilePutByIdWithThis()
3064 LValue base = lowJSValue(m_node->child1());
3065 LValue thisValue = lowJSValue(m_node->child2());
3066 LValue value = lowJSValue(m_node->child3());
3068 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
3069 m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
3072 void compilePutByValWithThis()
3074 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
3075 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
3076 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
3077 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
3079 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
3080 m_callFrame, base, thisValue, property, value);
3083 void compileAtomicsReadModifyWrite()
3085 TypedArrayType type = m_node->arrayMode().typedArrayType();
3086 unsigned numExtraArgs = numExtraAtomicsArgs(m_node->op());
3087 Edge baseEdge = m_graph.child(m_node, 0);
3088 Edge indexEdge = m_graph.child(m_node, 1);
3089 Edge argEdges[maxNumExtraAtomicsArgs];
3090 for (unsigned i = numExtraArgs; i--;)
3091 argEdges[i] = m_graph.child(m_node, 2 + i);
3092 Edge storageEdge = m_graph.child(m_node, 2 + numExtraArgs);
3094 auto operation = [&] () -> LValue {
3095 switch (m_node->op()) {
3097 return m_out.operation(operationAtomicsAdd);
3099 return m_out.operation(operationAtomicsAnd);
3100 case AtomicsCompareExchange:
3101 return m_out.operation(operationAtomicsCompareExchange);
3102 case AtomicsExchange:
3103 return m_out.operation(operationAtomicsExchange);
3105 return m_out.operation(operationAtomicsLoad);
3107 return m_out.operation(operationAtomicsOr);
3109 return m_out.operation(operationAtomicsStore);
3111 return m_out.operation(operationAtomicsSub);
3113 return m_out.operation(operationAtomicsXor);
3115 RELEASE_ASSERT_NOT_REACHED();
3121 Vector<LValue> args;
3122 args.append(m_callFrame);
3123 args.append(lowJSValue(baseEdge));
3124 args.append(lowJSValue(indexEdge));
3125 for (unsigned i = 0; i < numExtraArgs; ++i)
3126 args.append(lowJSValue(argEdges[i]));
3127 LValue result = vmCall(Int64, operation(), args);
3132 LValue index = lowInt32(indexEdge);
3134 for (unsigned i = numExtraArgs; i--;)
3135 args[i] = getIntTypedArrayStoreOperand(argEdges[i]);
3136 LValue storage = lowStorage(storageEdge);
3138 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
3139 Width width = widthForBytes(elementSize(type));
3144 auto sanitizeResult = [&] (LValue value) -> LValue {
3145 if (isSigned(type)) {
3146 switch (elementSize(type)) {
3148 value = m_out.bitAnd(value, m_out.constInt32(0xff));
3151 value = m_out.bitAnd(value, m_out.constInt32(0xffff));
3156 RELEASE_ASSERT_NOT_REACHED();
3163 switch (m_node->op()) {
3165 atomicValue = m_out.atomicXchgAdd(args[0], pointer, width);
3166 result = sanitizeResult(atomicValue);
3169 atomicValue = m_out.atomicXchgAnd(args[0], pointer, width);
3170 result = sanitizeResult(atomicValue);
3172 case AtomicsCompareExchange:
3173 atomicValue = m_out.atomicStrongCAS(args[0], args[1], pointer, width);
3174 result = sanitizeResult(atomicValue);
3176 case AtomicsExchange:
3177 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3178 result = sanitizeResult(atomicValue);
3181 atomicValue = m_out.atomicXchgAdd(m_out.int32Zero, pointer, width);
3182 result = sanitizeResult(atomicValue);
3185 atomicValue = m_out.atomicXchgOr(args[0], pointer, width);
3186 result = sanitizeResult(atomicValue);
3189 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3193 atomicValue = m_out.atomicXchgSub(args[0], pointer, width);
3194 result = sanitizeResult(atomicValue);
3197 atomicValue = m_out.atomicXchgXor(args[0], pointer, width);
3198 result = sanitizeResult(atomicValue);
3201 RELEASE_ASSERT_NOT_REACHED();
3204 // Signify that the state against which the atomic operations are serialized is confined to just
3205 // the typed array storage, since that's as precise of an abstraction as we can have of shared
3206 // array buffer storage.
3207 m_heaps.decorateFencedAccess(&m_heaps.typedArrayProperties, atomicValue);
3209 setIntTypedArrayLoadResult(result, type);
3212 void compileAtomicsIsLockFree()
3214 if (m_node->child1().useKind() != Int32Use) {
3215 setJSValue(vmCall(Int64, m_out.operation(operationAtomicsIsLockFree), m_callFrame, lowJSValue(m_node->child1())));
3219 LValue bytes = lowInt32(m_node->child1());
3221 LBasicBlock trueCase = m_out.newBlock();
3222 LBasicBlock falseCase = m_out.newBlock();
3223 LBasicBlock continuation = m_out.newBlock();
3225 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueCase);
3227 Vector<SwitchCase> cases;
3228 cases.append(SwitchCase(m_out.constInt32(1), trueCase, Weight()));
3229 cases.append(SwitchCase(m_out.constInt32(2), trueCase, Weight()));
3230 cases.append(SwitchCase(m_out.constInt32(4), trueCase, Weight()));
3231 m_out.switchInstruction(bytes, cases, falseCase, Weight());
3233 m_out.appendTo(trueCase, falseCase);
3234 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
3235 m_out.jump(continuation);
3236 m_out.appendTo(falseCase, continuation);
3237 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
3238 m_out.jump(continuation);
3240 m_out.appendTo(continuation, lastNext);
3241 setBoolean(m_out.phi(Int32, trueValue, falseValue));
3244 void compileDefineDataProperty()
3246 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3247 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
3248 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 3));
3249 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3250 switch (propertyEdge.useKind()) {
3252 LValue property = lowString(propertyEdge);
3253 vmCall(Void, m_out.operation(operationDefineDataPropertyString), m_callFrame, base, property, value, attributes);
3256 case StringIdentUse: {
3257 LValue property = lowStringIdent(propertyEdge);
3258 vmCall(Void, m_out.operation(operationDefineDataPropertyStringIdent), m_callFrame, base, property, value, attributes);
3262 LValue property = lowSymbol(propertyEdge);
3263 vmCall(Void, m_out.operation(operationDefineDataPropertySymbol), m_callFrame, base, property, value, attributes);
3267 LValue property = lowJSValue(propertyEdge);
3268 vmCall(Void, m_out.operation(operationDefineDataProperty), m_callFrame, base, property, value, attributes);
3272 RELEASE_ASSERT_NOT_REACHED();
3276 void compileDefineAccessorProperty()
3278 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3279 LValue getter = lowCell(m_graph.varArgChild(m_node, 2));
3280 LValue setter = lowCell(m_graph.varArgChild(m_node, 3));
3281 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 4));
3282 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3283 switch (propertyEdge.useKind()) {
3285 LValue property = lowString(propertyEdge);
3286 vmCall(Void, m_out.operation(operationDefineAccessorPropertyString), m_callFrame, base, property, getter, setter, attributes);
3289 case StringIdentUse: {
3290 LValue property = lowStringIdent(propertyEdge);
3291 vmCall(Void, m_out.operation(operationDefineAccessorPropertyStringIdent), m_callFrame, base, property, getter, setter, attributes);
3295 LValue property = lowSymbol(propertyEdge);
3296 vmCall(Void, m_out.operation(operationDefineAccessorPropertySymbol), m_callFrame, base, property, getter, setter, attributes);
3300 LValue property = lowJSValue(propertyEdge);
3301 vmCall(Void, m_out.operation(operationDefineAccessorProperty), m_callFrame, base, property, getter, setter, attributes);
3305 RELEASE_ASSERT_NOT_REACHED();
3309 void compilePutById()
3311 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse);
3313 Node* node = m_node;
3314 LValue base = lowCell(node->child1());
3315 LValue value = lowJSValue(node->child2());
3316 auto uid = m_graph.identifiers()[node->identifierNumber()];
3318 B3::PatchpointValue* patchpoint = m_out.patchpoint(Void);
3319 patchpoint->appendSomeRegister(base);
3320 patchpoint->appendSomeRegister(value);
3321 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
3322 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
3323 patchpoint->clobber(RegisterSet::macroScratchRegisters());
3325 // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
3326 // https://bugs.webkit.org/show_bug.cgi?id=152848
3328 RefPtr<PatchpointExceptionHandle> exceptionHandle =
3329 preparePatchpointForExceptions(patchpoint);
3331 State* state = &m_ftlState;
3332 ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->ecmaMode();
3334 patchpoint->setGenerator(
3335 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3336 AllowMacroScratchRegisterUsage allowScratch(jit);
3338 CallSiteIndex callSiteIndex =
3339 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
3341 Box<CCallHelpers::JumpList> exceptions =
3342 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
3344 // JS setter call ICs generated by the PutById IC will need this.
3345 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
3347 auto generator = Box<JITPutByIdGenerator>::create(
3348 jit.codeBlock(), node->origin.semantic, callSiteIndex,
3349 params.unavailableRegisters(), JSValueRegs(params[0].gpr()),
3350 JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode,
3351 node->op() == PutByIdDirect ? Direct : NotDirect);
3353 generator->generateFastPath(jit);
3354 CCallHelpers::Label done = jit.label();
3357 [=] (CCallHelpers& jit) {
3358 AllowMacroScratchRegisterUsage allowScratch(jit);
3360 generator->slowPathJump().link(&jit);
3361 CCallHelpers::Label slowPathBegin = jit.label();
3362 CCallHelpers::Call slowPathCall = callOperation(
3363 *state, params.unavailableRegisters(), jit, node->origin.semantic,
3364 exceptions.get(), generator->slowPathFunction(), InvalidGPRReg,
3365 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
3366 params[0].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
3367 jit.jump().linkTo(done, &jit);
3369 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
3372 [=] (LinkBuffer& linkBuffer) {
3373 generator->finalize(linkBuffer);
3379 void compileGetButterfly()
3381 LValue butterfly = m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly);
3382 if (m_node->op() != GetButterflyWithoutCaging)
3383 butterfly = caged(Gigacage::JSValue, butterfly);
3384 setStorage(butterfly);
3387 void compileConstantStoragePointer()
3389 setStorage(m_out.constIntPtr(m_node->storagePointer()));
3392 void compileGetIndexedPropertyStorage()
3394 LValue cell = lowCell(m_node->child1());
3396 if (m_node->arrayMode().type() == Array::String) {
3397 LBasicBlock slowPath = m_out.newBlock();
3398 LBasicBlock continuation = m_out.newBlock();
3400 LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
3401 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
3404 m_out.notNull(fastResultValue), usually(continuation), rarely(slowPath));
3406 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
3408 ValueFromBlock slowResult = m_out.anchor(
3409 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, cell));
3411 m_out.jump(continuation);
3413 m_out.appendTo(continuation, lastNext);
3415 setStorage(m_out.loadPtr(m_out.phi(pointerType(), fastResult, slowResult), m_heaps.StringImpl_data));
3419 DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType()));
3420 setStorage(caged(Gigacage::Primitive, m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector)));
3423 void compileCheckArray()
3425 Edge edge = m_node->child1();
3426 LValue cell = lowCell(edge);
3428 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
3432 BadIndexingType, jsValueValue(cell), 0,
3433 m_out.logicalNot(isArrayTypeForCheckArray(cell, m_node->arrayMode())));
3436 void compileGetTypedArrayByteOffset()
3438 LValue basePtr = lowCell(m_node->child1());
3440 LBasicBlock simpleCase = m_out.newBlock();
3441 LBasicBlock wastefulCase = m_out.newBlock();
3442 LBasicBlock continuation = m_out.newBlock();
3444 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
3446 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
3447 unsure(simpleCase), unsure(wastefulCase));
3449 LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
3451 ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
3453 m_out.jump(continuation);
3455 m_out.appendTo(wastefulCase, continuation);
3457 LValue vectorPtr = cagedMayBeNull(
3458 Gigacage::Primitive,
3459 m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector));
3460 LValue butterflyPtr = caged(Gigacage::JSValue, m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly));
3461 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
3462 // FIXME: This needs caging.
3463 // https://bugs.webkit.org/show_bug.cgi?id=175515
3464 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
3466 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
3468 m_out.jump(continuation);
3469 m_out.appendTo(continuation, lastNext);
3471 setInt32(m_out.castToInt32(m_out.phi(pointerType(), simpleOut, wastefulOut)));
3474 void compileGetPrototypeOf()
3476 switch (m_node->child1().useKind()) {
3479 case FinalObjectUse: {
3480 LValue object = lowCell(m_node->child1());
3481 switch (m_node->child1().useKind()) {
3483 speculateArray(m_node->child1(), object);
3486 speculateFunction(m_node->child1(), object);
3488 case FinalObjectUse:
3489 speculateFinalObject(m_node->child1(), object);
3492 RELEASE_ASSERT_NOT_REACHED();
3496 LValue structure = loadStructure(object);
3498 AbstractValue& value = m_state.forNode(m_node->child1());
3499 if ((value.m_type && !(value.m_type & ~SpecObject)) && value.m_structure.isFinite()) {
3500 bool hasPolyProto = false;
3501 bool hasMonoProto = false;
3502 value.m_structure.forEach([&] (RegisteredStructure structure) {
3503 if (structure->hasPolyProto())
3504 hasPolyProto = true;
3506 hasMonoProto = true;
3509 if (hasMonoProto && !hasPolyProto) {
3510 setJSValue(m_out.load64(structure, m_heaps.Structure_prototype));
3514 if (hasPolyProto && !hasMonoProto) {
3515 setJSValue(m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
3520 LBasicBlock continuation = m_out.newBlock();
3521 LBasicBlock loadPolyProto = m_out.newBlock();
3523 LValue prototypeBits = m_out.load64(structure, m_heaps.Structure_prototype);
3524 ValueFromBlock directPrototype = m_out.anchor(prototypeBits);
3525 m_out.branch(m_out.isZero64(prototypeBits), unsure(loadPolyProto), unsure(continuation));
3527 LBasicBlock lastNext = m_out.appendTo(loadPolyProto, continuation);
3528 ValueFromBlock polyProto = m_out.anchor(
3529 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
3530 m_out.jump(continuation);
3532 m_out.appendTo(continuation, lastNext);
3533 setJSValue(m_out.phi(Int64, directPrototype, polyProto));
3537 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOfObject), m_callFrame, lowObject(m_node->child1())));
3541 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOf), m_callFrame, lowJSValue(m_node->child1())));
3547 void compileGetArrayLength()
3549 switch (m_node->arrayMode().type()) {
3550 case Array::Undecided:
3553 case Array::Contiguous: {
3554 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
3558 case Array::String: {
3559 LValue string = lowCell(m_node->child1());
3560 setInt32(m_out.load32NonNegative(string, m_heaps.JSString_length));
3564 case Array::DirectArguments: {
3565 LValue arguments = lowCell(m_node->child1());
3567 ExoticObjectMode, noValue(), nullptr,
3568 m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_mappedArguments)));
3569 setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
3573 case Array::ScopedArguments: {
3574 LValue arguments = lowCell(m_node->child1());
3576 ExoticObjectMode, noValue(), nullptr,
3577 m_out.notZero32(m_out.load8ZeroExt32(arguments, m_heaps.ScopedArguments_overrodeThings)));
3578 setInt32(m_out.load32NonNegative(arguments, m_heaps.ScopedArguments_totalLength));
3583 if (m_node->arrayMode().isSomeTypedArrayView()) {
3585 m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
3589 DFG_CRASH(m_graph, m_node, "Bad array type");
3594 void compileGetVectorLength()
3596 switch (m_node->arrayMode().type()) {
3597 case Array::ArrayStorage:
3598 case Array::SlowPutArrayStorage:
3599 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_vectorLength));
3606 void compileCheckInBounds()
3609 OutOfBounds, noValue(), 0,
3610 m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3613 void compileGetByVal()
3615 switch (m_node->arrayMode().type()) {
3617 case Array::Contiguous: {
3618 LValue index = lowInt32(m_node->child2());
3619 LValue storage = lowStorage(m_node->child3());
3621 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
3622 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
3624 if (m_node->arrayMode().isInBounds()) {
3625 LValue result = m_out.load64(baseIndex(heap, storage, index, m_node->child2()));