2 * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLLowerDFGToB3.h"
32 #include "AirGenerationContext.h"
33 #include "AllowMacroScratchRegisterUsage.h"
34 #include "AtomicsObject.h"
35 #include "B3CheckValue.h"
36 #include "B3FenceValue.h"
37 #include "B3PatchpointValue.h"
38 #include "B3SlotBaseValue.h"
39 #include "B3StackmapGenerationParams.h"
40 #include "B3ValueInlines.h"
41 #include "CallFrameShuffler.h"
42 #include "CodeBlockWithJITType.h"
43 #include "DFGAbstractInterpreterInlines.h"
44 #include "DFGCapabilities.h"
45 #include "DFGDominators.h"
46 #include "DFGInPlaceAbstractState.h"
47 #include "DFGOSRAvailabilityAnalysisPhase.h"
48 #include "DFGOSRExitFuzz.h"
49 #include "DirectArguments.h"
50 #include "FTLAbstractHeapRepository.h"
51 #include "FTLAvailableRecovery.h"
52 #include "FTLExceptionTarget.h"
53 #include "FTLForOSREntryJITCode.h"
54 #include "FTLFormattedValue.h"
55 #include "FTLLazySlowPathCall.h"
56 #include "FTLLoweredNodeValue.h"
57 #include "FTLOperations.h"
58 #include "FTLOutput.h"
59 #include "FTLPatchpointExceptionHandle.h"
60 #include "FTLSnippetParams.h"
61 #include "FTLThunks.h"
62 #include "FTLWeightedTarget.h"
63 #include "JITAddGenerator.h"
64 #include "JITBitAndGenerator.h"
65 #include "JITBitOrGenerator.h"
66 #include "JITBitXorGenerator.h"
67 #include "JITDivGenerator.h"
68 #include "JITInlineCacheGenerator.h"
69 #include "JITLeftShiftGenerator.h"
70 #include "JITMathIC.h"
71 #include "JITMulGenerator.h"
72 #include "JITRightShiftGenerator.h"
73 #include "JITSubGenerator.h"
74 #include "JSAsyncFunction.h"
75 #include "JSAsyncGeneratorFunction.h"
76 #include "JSCInlines.h"
77 #include "JSGeneratorFunction.h"
78 #include "JSLexicalEnvironment.h"
80 #include "OperandsInlines.h"
81 #include "RegExpObject.h"
82 #include "ScopedArguments.h"
83 #include "ScopedArgumentsTable.h"
84 #include "ScratchRegisterAllocator.h"
85 #include "SetupVarargsFrame.h"
86 #include "ShadowChicken.h"
87 #include "StructureStubInfo.h"
88 #include "SuperSampler.h"
89 #include "ThunkGenerators.h"
90 #include "VirtualRegister.h"
93 #include <unordered_set>
95 #include <wtf/Gigacage.h>
96 #include <wtf/RecursableLambda.h>
99 #define RELEASE_ASSERT(assertion) do { \
100 if (!(assertion)) { \
101 WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
106 namespace JSC { namespace FTL {
113 std::atomic<int> compileCounter;
116 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
117 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
119 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
120 if (nodeIndex != UINT_MAX)
121 dataLog(", node @", nodeIndex);
127 // Using this instead of typeCheck() helps to reduce the load on B3, by creating
128 // significantly less dead code.
129 #define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
130 FormattedValue _ftc_lowValue = (lowValue); \
131 Edge _ftc_highValue = (highValue); \
132 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
133 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
135 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
138 #define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
139 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
142 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
144 LowerDFGToB3(State& state)
145 : m_graph(state.graph)
148 , m_proc(*state.proc)
149 , m_availabilityCalculator(m_graph)
150 , m_state(state.graph)
151 , m_interpreter(state.graph, m_state)
157 State* state = &m_ftlState;
160 if (verboseCompilationEnabled()) {
162 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
163 "_", codeBlock()->hash());
168 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
169 CodeBlock* codeBlock = m_graph.m_codeBlock;
171 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
172 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
173 AllowMacroScratchRegisterUsage allowScratch(jit);
174 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
175 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
176 jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
179 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
180 RELEASE_ASSERT(catchEntrypointIndex != 0);
181 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
184 if (m_graph.m_maxLocalsForCatchOSREntry) {
185 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
186 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
190 m_graph.ensureSSADominators();
192 if (verboseCompilationEnabled())
193 dataLog("Function ready, beginning lowering.\n");
195 m_out.initialize(m_heaps);
197 // We use prologue frequency for all of the initialization code.
198 m_out.setFrequency(1);
200 bool hasMultipleEntrypoints = m_graph.m_numberOfEntrypoints > 1;
202 LBasicBlock prologue = m_out.newBlock();
203 LBasicBlock callEntrypointArgumentSpeculations = hasMultipleEntrypoints ? m_out.newBlock() : nullptr;
204 m_handleExceptions = m_out.newBlock();
206 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
207 m_highBlock = m_graph.block(blockIndex);
210 m_out.setFrequency(m_highBlock->executionCount);
211 m_blocks.add(m_highBlock, m_out.newBlock());
214 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
215 m_out.setFrequency(1);
217 m_out.appendTo(prologue, hasMultipleEntrypoints ? callEntrypointArgumentSpeculations : m_handleExceptions);
218 m_out.initializeConstants(m_proc, prologue);
219 createPhiVariables();
221 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
222 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
223 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
224 state->capturedValue = capturedBase->slot();
226 auto preOrder = m_graph.blocksInPreOrder();
228 m_callFrame = m_out.framePointer();
229 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
230 m_tagMask = m_out.constInt64(TagMask);
232 // Make sure that B3 knows that we really care about the mask registers. This forces the
233 // constants to be materialized in registers.
234 m_proc.addFastConstant(m_tagTypeNumber->key());
235 m_proc.addFastConstant(m_tagMask->key());
237 // We don't want the CodeBlock to have a weak pointer to itself because
238 // that would cause it to always get collected.
239 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
241 VM* vm = &this->vm();
243 // Stack Overflow Check.
244 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
245 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
246 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
247 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
248 stackOverflowHandler->appendSomeRegister(m_callFrame);
249 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
250 stackOverflowHandler->numGPScratchRegisters = 1;
251 stackOverflowHandler->setGenerator(
252 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
253 AllowMacroScratchRegisterUsage allowScratch(jit);
254 GPRReg fp = params[0].gpr();
255 GPRReg scratch = params.gpScratch(0);
257 unsigned ftlFrameSize = params.proc().frameSize();
258 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
260 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
261 MacroAssembler::JumpList stackOverflow;
262 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
263 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
264 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
266 params.addLatePath([=] (CCallHelpers& jit) {
267 AllowMacroScratchRegisterUsage allowScratch(jit);
269 stackOverflow.link(&jit);
271 // FIXME: We would not have to do this if the stack check was part of the Air
272 // prologue. Then, we would know that there is no way for the callee-saves to
274 // https://bugs.webkit.org/show_bug.cgi?id=172456
275 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
278 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
279 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
280 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
282 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
283 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
284 CCallHelpers::Call throwCall = jit.call();
286 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
287 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
288 CCallHelpers::Call lookupExceptionHandlerCall = jit.call();
289 jit.jumpToExceptionHandler(*vm);
292 [=] (LinkBuffer& linkBuffer) {
293 linkBuffer.link(throwCall, FunctionPtr(operationThrowStackOverflowError));
294 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
299 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
302 if (hasMultipleEntrypoints) {
303 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
304 successors[0] = callEntrypointArgumentSpeculations;
305 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
306 // Currently, the only other entrypoint is an op_catch entrypoint.
307 // We do OSR entry at op_catch, and we prove argument formats before
308 // jumping to FTL code, so we don't need to check argument types here
309 // for these entrypoints.
310 successors[i] = firstDFGBasicBlock;
313 m_out.entrySwitch(successors);
314 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
318 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
321 availabilityMap().clear();
322 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
323 for (unsigned i = codeBlock()->numParameters(); i--;) {
324 availabilityMap().m_locals.argument(i) =
325 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
328 for (unsigned i = codeBlock()->numParameters(); i--;) {
329 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
330 VirtualRegister operand = virtualRegisterForArgument(i);
331 LValue jsValue = m_out.load64(addressFor(operand));
333 switch (m_graph.m_argumentFormats[0][i]) {
335 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
338 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
341 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
346 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
350 m_out.jump(firstDFGBasicBlock);
354 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
355 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
356 m_out.patchpoint(Void)->setGenerator(
357 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
358 CCallHelpers::Jump jump = jit.jump();
360 [=] (LinkBuffer& linkBuffer) {
361 linkBuffer.link(jump, linkBuffer.locationOf(*exceptionHandler));
366 for (DFG::BasicBlock* block : preOrder)
369 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
370 // to happen last because our abstract heaps are generated lazily. They have to be
371 // generated lazily because we have an infinite number of numbered, indexed, and
372 // absolute heaps. We only become aware of the ones we actually mention while lowering.
373 m_heaps.computeRangesAndDecorateInstructions();
375 // We create all Phi's up front, but we may then decide not to compile the basic block
376 // that would have contained one of them. So this creates orphans, which triggers B3
377 // validation failures. Calling this fixes the issue.
379 // Note that you should avoid the temptation to make this call conditional upon
380 // validation being enabled. B3 makes no guarantees of any kind of correctness when
381 // dealing with IR that would have failed validation. For example, it would be valid to
382 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
383 // if any orphans were around. We might even have such phases already.
384 m_proc.deleteOrphans();
386 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
387 m_out.applyBlockOrder();
392 void createPhiVariables()
394 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
395 DFG::BasicBlock* block = m_graph.block(blockIndex);
398 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
399 Node* node = block->at(nodeIndex);
400 if (node->op() != DFG::Phi)
403 switch (node->flags() & NodeResultMask) {
404 case NodeResultDouble:
407 case NodeResultInt32:
410 case NodeResultInt52:
413 case NodeResultBoolean:
420 DFG_CRASH(m_graph, node, "Bad Phi node result type");
423 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
428 void compileBlock(DFG::BasicBlock* block)
433 if (verboseCompilationEnabled())
434 dataLog("Compiling block ", *block, "\n");
438 // Make sure that any blocks created while lowering code in the high block have the frequency of
439 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
440 // something roughly approximate for things like register allocation.
441 m_out.setFrequency(m_highBlock->executionCount);
443 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
446 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
447 m_nextHighBlock = m_graph.block(nextBlockIndex);
451 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
453 // All of this effort to find the next block gives us the ability to keep the
454 // generated IR in roughly program order. This ought not affect the performance
455 // of the generated code (since we expect B3 to reorder things) but it will
456 // make IR dumps easier to read.
457 m_out.appendTo(lowBlock, m_nextLowBlock);
459 if (Options::ftlCrashes())
462 if (!m_highBlock->cfaHasVisited) {
463 if (verboseCompilationEnabled())
464 dataLog("Bailing because CFA didn't reach.\n");
465 crash(m_highBlock, nullptr);
469 m_availabilityCalculator.beginBlock(m_highBlock);
472 m_state.beginBasicBlock(m_highBlock);
474 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
475 if (!compileNode(m_nodeIndex))
480 void safelyInvalidateAfterTermination()
482 if (verboseCompilationEnabled())
483 dataLog("Bailing.\n");
486 // Invalidate dominated blocks. Under normal circumstances we would expect
487 // them to be invalidated already. But you can have the CFA become more
488 // precise over time because the structures of objects change on the main
489 // thread. Failing to do this would result in weird crashes due to a value
490 // being used but not defined. Race conditions FTW!
491 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
492 DFG::BasicBlock* target = m_graph.block(blockIndex);
495 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
496 if (verboseCompilationEnabled())
497 dataLog("Block ", *target, " will bail also.\n");
498 target->cfaHasVisited = false;
503 bool compileNode(unsigned nodeIndex)
505 if (!m_state.isValid()) {
506 safelyInvalidateAfterTermination();
510 m_node = m_highBlock->at(nodeIndex);
511 m_origin = m_node->origin;
512 m_out.setOrigin(m_node);
514 if (verboseCompilationEnabled())
515 dataLog("Lowering ", m_node, "\n");
517 m_availableRecoveries.shrink(0);
519 m_interpreter.startExecuting();
520 m_interpreter.executeKnownEdgeTypes(m_node);
522 switch (m_node->op()) {
532 compileDoubleConstant();
535 compileInt52Constant();
538 compileLazyJSConstant();
544 compileDoubleAsInt32();
553 compileValueToInt32();
555 case BooleanToNumber:
556 compileBooleanToNumber();
558 case ExtractOSREntryLocal:
559 compileExtractOSREntryLocal();
561 case ExtractCatchLocal:
562 compileExtractCatchLocal();
574 case CallObjectConstructor:
575 compileToObjectOrCallObjectConstructor();
588 compileArithAddOrSub();
604 compileArithMinOrMax();
613 compileArithRandom();
631 compileArithFRound();
634 compileArithNegate();
658 compileUInt32ToNumber();
661 compileCheckStructure();
663 case CheckStructureOrEmpty:
664 compileCheckStructureOrEmpty();
670 compileCheckNotEmpty();
673 compileCheckBadCell();
675 case CheckStringIdent:
676 compileCheckStringIdent();
679 compileGetExecutable();
682 case ArrayifyToStructure:
686 compilePutStructure();
689 compileGetById(AccessType::TryGet);
693 compileGetById(AccessType::Get);
695 case GetByIdWithThis:
696 compileGetByIdWithThis();
702 compileHasOwnProperty();
709 case PutByIdWithThis:
710 compilePutByIdWithThis();
714 compilePutAccessorById();
716 case PutGetterSetterById:
717 compilePutGetterSetterById();
721 compilePutAccessorByVal();
727 compileDeleteByVal();
730 compileGetButterfly();
732 case ConstantStoragePointer:
733 compileConstantStoragePointer();
735 case GetIndexedPropertyStorage:
736 compileGetIndexedPropertyStorage();
742 compileGetArrayLength();
744 case GetVectorLength:
745 compileGetVectorLength();
748 compileCheckInBounds();
753 case GetMyArgumentByVal:
754 case GetMyArgumentByValOutOfBounds:
755 compileGetMyArgumentByVal();
757 case GetByValWithThis:
758 compileGetByValWithThis();
765 case PutByValWithThis:
766 compilePutByValWithThis();
770 case AtomicsCompareExchange:
771 case AtomicsExchange:
777 compileAtomicsReadModifyWrite();
779 case AtomicsIsLockFree:
780 compileAtomicsIsLockFree();
782 case DefineDataProperty:
783 compileDefineDataProperty();
785 case DefineAccessorProperty:
786 compileDefineAccessorProperty();
798 compileArrayIndexOf();
800 case CreateActivation:
801 compileCreateActivation();
804 compilePushWithScope();
807 case NewGeneratorFunction:
808 case NewAsyncGeneratorFunction:
809 case NewAsyncFunction:
810 compileNewFunction();
812 case CreateDirectArguments:
813 compileCreateDirectArguments();
815 case CreateScopedArguments:
816 compileCreateScopedArguments();
818 case CreateClonedArguments:
819 compileCreateClonedArguments();
824 case NewStringObject:
825 compileNewStringObject();
830 case NewArrayWithSpread:
831 compileNewArrayWithSpread();
837 compileNewArrayBuffer();
839 case NewArrayWithSize:
840 compileNewArrayWithSize();
843 compileNewTypedArray();
845 case GetTypedArrayByteOffset:
846 compileGetTypedArrayByteOffset();
849 compileGetPrototypeOf();
851 case AllocatePropertyStorage:
852 compileAllocatePropertyStorage();
854 case ReallocatePropertyStorage:
855 compileReallocatePropertyStorage();
857 case NukeStructureAndSetButterfly:
858 compileNukeStructureAndSetButterfly();
864 case CallStringConstructor:
865 compileToStringOrCallStringConstructor();
868 compileToPrimitive();
874 compileStringCharAt();
876 case StringCharCodeAt:
877 compileStringCharCodeAt();
879 case StringFromCharCode:
880 compileStringFromCharCode();
883 case GetGetterSetterByOffset:
884 compileGetByOffset();
892 case MultiGetByOffset:
893 compileMultiGetByOffset();
896 compilePutByOffset();
898 case MultiPutByOffset:
899 compileMultiPutByOffset();
902 case GetGlobalLexicalVariable:
903 compileGetGlobalVariable();
905 case PutGlobalVariable:
906 compilePutGlobalVariable();
909 compileNotifyWrite();
914 case GetArgumentCountIncludingThis:
915 compileGetArgumentCountIncludingThis();
923 case GetGlobalObject:
924 compileGetGlobalObject();
927 compileGetGlobalThis();
930 compileGetClosureVar();
933 compilePutClosureVar();
935 case GetFromArguments:
936 compileGetFromArguments();
939 compilePutToArguments();
942 compileGetArgument();
947 case CompareStrictEq:
948 compileCompareStrictEq();
951 compileCompareLess();
954 compileCompareLessEq();
957 compileCompareGreater();
959 case CompareGreaterEq:
960 compileCompareGreaterEq();
963 compileCompareBelow();
966 compileCompareBelowEq();
969 compileCompareEqPtr();
975 case TailCallInlinedCaller:
977 compileCallOrConstruct();
980 case DirectTailCallInlinedCaller:
981 case DirectConstruct:
983 compileDirectCallOrConstruct();
989 case CallForwardVarargs:
990 case TailCallVarargs:
991 case TailCallVarargsInlinedCaller:
992 case TailCallForwardVarargs:
993 case TailCallForwardVarargsInlinedCaller:
994 case ConstructVarargs:
995 case ConstructForwardVarargs:
996 compileCallOrConstructVarargs();
1002 compileLoadVarargs();
1004 case ForwardVarargs:
1005 compileForwardVarargs();
1016 case DFG::EntrySwitch:
1017 compileEntrySwitch();
1023 compileForceOSRExit();
1027 compileCPUIntrinsic();
1029 RELEASE_ASSERT_NOT_REACHED();
1035 case ThrowStaticError:
1036 compileThrowStaticError();
1038 case InvalidationPoint:
1039 compileInvalidationPoint();
1045 compileIsUndefined();
1053 case IsCellWithType:
1054 compileIsCellWithType();
1059 case NormalizeMapKey:
1060 compileNormalizeMapKey();
1063 compileGetMapBucket();
1065 case GetMapBucketHead:
1066 compileGetMapBucketHead();
1068 case GetMapBucketNext:
1069 compileGetMapBucketNext();
1071 case LoadKeyFromMapBucket:
1072 compileLoadKeyFromMapBucket();
1074 case LoadValueFromMapBucket:
1075 compileLoadValueFromMapBucket();
1077 case ExtractValueFromWeakMapGet:
1078 compileExtractValueFromWeakMapGet();
1087 compileWeakMapGet();
1092 case IsObjectOrNull:
1093 compileIsObjectOrNull();
1096 compileIsFunction();
1098 case IsTypedArrayView:
1099 compileIsTypedArrayView();
1107 case CheckTypeInfoFlags:
1108 compileCheckTypeInfoFlags();
1110 case OverridesHasInstance:
1111 compileOverridesHasInstance();
1114 compileInstanceOf();
1116 case InstanceOfCustom:
1117 compileInstanceOfCustom();
1119 case CountExecution:
1120 compileCountExecution();
1122 case SuperSamplerBegin:
1123 compileSuperSamplerBegin();
1125 case SuperSamplerEnd:
1126 compileSuperSamplerEnd();
1129 case FencedStoreBarrier:
1130 compileStoreBarrier();
1132 case HasIndexedProperty:
1133 compileHasIndexedProperty();
1135 case HasGenericProperty:
1136 compileHasGenericProperty();
1138 case HasStructureProperty:
1139 compileHasStructureProperty();
1141 case GetDirectPname:
1142 compileGetDirectPname();
1144 case GetEnumerableLength:
1145 compileGetEnumerableLength();
1147 case GetPropertyEnumerator:
1148 compileGetPropertyEnumerator();
1150 case GetEnumeratorStructurePname:
1151 compileGetEnumeratorStructurePname();
1153 case GetEnumeratorGenericPname:
1154 compileGetEnumeratorGenericPname();
1157 compileToIndexString();
1159 case CheckStructureImmediate:
1160 compileCheckStructureImmediate();
1162 case MaterializeNewObject:
1163 compileMaterializeNewObject();
1165 case MaterializeCreateActivation:
1166 compileMaterializeCreateActivation();
1169 if (Options::usePollingTraps())
1170 compileCheckTraps();
1173 compileCreateRest();
1176 compileGetRestLength();
1179 compileRegExpExec();
1182 compileRegExpTest();
1187 case SetFunctionName:
1188 compileSetFunctionName();
1191 case StringReplaceRegExp:
1192 compileStringReplace();
1194 case GetRegExpObjectLastIndex:
1195 compileGetRegExpObjectLastIndex();
1197 case SetRegExpObjectLastIndex:
1198 compileSetRegExpObjectLastIndex();
1200 case LogShadowChickenPrologue:
1201 compileLogShadowChickenPrologue();
1203 case LogShadowChickenTail:
1204 compileLogShadowChickenTail();
1206 case RecordRegExpCachedResult:
1207 compileRecordRegExpCachedResult();
1209 case ResolveScopeForHoistingFuncDeclInEval:
1210 compileResolveScopeForHoistingFuncDeclInEval();
1213 compileResolveScope();
1216 compileGetDynamicVar();
1219 compilePutDynamicVar();
1222 compileUnreachable();
1225 compileStringSlice();
1228 compileToLowerCase();
1230 case NumberToStringWithRadix:
1231 compileNumberToStringWithRadix();
1233 case NumberToStringWithValidRadixConstant:
1234 compileNumberToStringWithValidRadixConstant();
1237 compileCheckSubClass();
1243 compileCallDOMGetter();
1251 case PhantomNewObject:
1252 case PhantomNewFunction:
1253 case PhantomNewGeneratorFunction:
1254 case PhantomNewAsyncGeneratorFunction:
1255 case PhantomNewAsyncFunction:
1256 case PhantomCreateActivation:
1257 case PhantomDirectArguments:
1258 case PhantomCreateRest:
1260 case PhantomNewArrayWithSpread:
1261 case PhantomNewArrayBuffer:
1262 case PhantomClonedArguments:
1266 case InitializeEntrypointArguments:
1269 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1273 if (m_node->isTerminal())
1276 if (!m_state.isValid()) {
1277 safelyInvalidateAfterTermination();
1281 m_availabilityCalculator.executeNode(m_node);
1282 m_interpreter.executeEffects(nodeIndex);
1287 void compileUpsilon()
1289 LValue upsilonValue = nullptr;
1290 switch (m_node->child1().useKind()) {
1292 upsilonValue = lowDouble(m_node->child1());
1296 upsilonValue = lowInt32(m_node->child1());
1299 upsilonValue = lowInt52(m_node->child1());
1302 case KnownBooleanUse:
1303 upsilonValue = lowBoolean(m_node->child1());
1307 upsilonValue = lowCell(m_node->child1());
1310 upsilonValue = lowJSValue(m_node->child1());
1313 DFG_CRASH(m_graph, m_node, "Bad use kind");
1316 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1317 LValue phiNode = m_phis.get(m_node->phi());
1318 m_out.addIncomingToPhi(phiNode, upsilon);
1323 LValue phi = m_phis.get(m_node);
1324 m_out.m_block->append(phi);
1326 switch (m_node->flags() & NodeResultMask) {
1327 case NodeResultDouble:
1330 case NodeResultInt32:
1333 case NodeResultInt52:
1336 case NodeResultBoolean:
1343 DFG_CRASH(m_graph, m_node, "Bad use kind");
1348 void compileDoubleConstant()
1350 setDouble(m_out.constDouble(m_node->asNumber()));
1353 void compileInt52Constant()
1355 int64_t value = m_node->asAnyInt();
1357 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1358 setStrictInt52(m_out.constInt64(value));
1361 void compileLazyJSConstant()
1363 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1364 LazyJSValue value = m_node->lazyJSValue();
1365 patchpoint->setGenerator(
1366 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1367 value.emit(jit, JSValueRegs(params[0].gpr()));
1369 patchpoint->effects = Effects::none();
1370 setJSValue(patchpoint);
1373 void compileDoubleRep()
1375 switch (m_node->child1().useKind()) {
1376 case RealNumberUse: {
1377 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1379 LValue doubleValue = unboxDouble(value);
1381 LBasicBlock intCase = m_out.newBlock();
1382 LBasicBlock continuation = m_out.newBlock();
1384 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1386 m_out.doubleEqual(doubleValue, doubleValue),
1387 usually(continuation), rarely(intCase));
1389 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1392 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1393 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1394 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1395 m_out.jump(continuation);
1397 m_out.appendTo(continuation, lastNext);
1399 setDouble(m_out.phi(Double, fastResult, slowResult));
1405 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1407 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1409 LBasicBlock intCase = m_out.newBlock();
1410 LBasicBlock doubleTesting = m_out.newBlock();
1411 LBasicBlock doubleCase = m_out.newBlock();
1412 LBasicBlock nonDoubleCase = m_out.newBlock();
1413 LBasicBlock continuation = m_out.newBlock();
1416 isNotInt32(value, provenType(m_node->child1())),
1417 unsure(doubleTesting), unsure(intCase));
1419 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1421 ValueFromBlock intToDouble = m_out.anchor(
1422 m_out.intToDouble(unboxInt32(value)));
1423 m_out.jump(continuation);
1425 m_out.appendTo(doubleTesting, doubleCase);
1426 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1427 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1429 m_out.appendTo(doubleCase, nonDoubleCase);
1430 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1431 m_out.jump(continuation);
1433 if (shouldConvertNonNumber) {
1434 LBasicBlock undefinedCase = m_out.newBlock();
1435 LBasicBlock testNullCase = m_out.newBlock();
1436 LBasicBlock nullCase = m_out.newBlock();
1437 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1438 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1439 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1441 m_out.appendTo(nonDoubleCase, undefinedCase);
1442 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1443 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1445 m_out.appendTo(undefinedCase, testNullCase);
1446 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1447 m_out.jump(continuation);
1449 m_out.appendTo(testNullCase, nullCase);
1450 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1451 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1453 m_out.appendTo(nullCase, testBooleanTrueCase);
1454 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1455 m_out.jump(continuation);
1457 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1458 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1459 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1461 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1462 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1463 m_out.jump(continuation);
1465 m_out.appendTo(convertBooleanFalseCase, continuation);
1467 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1468 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
1469 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1470 m_out.jump(continuation);
1472 m_out.appendTo(continuation, lastNext);
1473 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1476 m_out.appendTo(nonDoubleCase, continuation);
1477 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1478 m_out.unreachable();
1480 m_out.appendTo(continuation, lastNext);
1482 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1487 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1492 DFG_CRASH(m_graph, m_node, "Bad use kind");
1496 void compileDoubleAsInt32()
1498 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1499 setInt32(integerValue);
1502 void compileValueRep()
1504 switch (m_node->child1().useKind()) {
1505 case DoubleRepUse: {
1506 LValue value = lowDouble(m_node->child1());
1508 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1509 value = m_out.select(
1510 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1513 setJSValue(boxDouble(value));
1518 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1523 DFG_CRASH(m_graph, m_node, "Bad use kind");
1527 void compileInt52Rep()
1529 switch (m_node->child1().useKind()) {
1531 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1536 jsValueToStrictInt52(
1537 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1540 case DoubleRepAnyIntUse:
1542 doubleToStrictInt52(
1543 m_node->child1(), lowDouble(m_node->child1())));
1547 RELEASE_ASSERT_NOT_REACHED();
1551 void compileValueToInt32()
1553 switch (m_node->child1().useKind()) {
1555 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1559 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1564 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1565 if (isValid(value)) {
1566 setInt32(value.value());
1570 value = m_jsValueValues.get(m_node->child1().node());
1571 if (isValid(value)) {
1572 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1576 // We'll basically just get here for constants. But it's good to have this
1577 // catch-all since we often add new representations into the mix.
1579 numberOrNotCellToInt32(
1581 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1586 DFG_CRASH(m_graph, m_node, "Bad use kind");
1591 void compileBooleanToNumber()
1593 switch (m_node->child1().useKind()) {
1595 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1600 LValue value = lowJSValue(m_node->child1());
1602 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1603 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1607 LBasicBlock booleanCase = m_out.newBlock();
1608 LBasicBlock continuation = m_out.newBlock();
1610 ValueFromBlock notBooleanResult = m_out.anchor(value);
1612 isBoolean(value, provenType(m_node->child1())),
1613 unsure(booleanCase), unsure(continuation));
1615 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1616 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1617 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1618 m_out.jump(continuation);
1620 m_out.appendTo(continuation, lastNext);
1621 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1626 RELEASE_ASSERT_NOT_REACHED();
1631 void compileExtractOSREntryLocal()
1633 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1634 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1635 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1638 void compileExtractCatchLocal()
1640 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
1641 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
1644 void compileGetStack()
1646 StackAccessData* data = m_node->stackAccessData();
1647 AbstractValue& value = m_state.variables().operand(data->local);
1649 DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
1650 DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
1652 if (isInt32Speculation(value.m_type))
1653 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1655 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1658 void compilePutStack()
1660 StackAccessData* data = m_node->stackAccessData();
1661 switch (data->format) {
1662 case FlushedJSValue: {
1663 LValue value = lowJSValue(m_node->child1());
1664 m_out.store64(value, addressFor(data->machineLocal));
1668 case FlushedDouble: {
1669 LValue value = lowDouble(m_node->child1());
1670 m_out.storeDouble(value, addressFor(data->machineLocal));
1674 case FlushedInt32: {
1675 LValue value = lowInt32(m_node->child1());
1676 m_out.store32(value, payloadFor(data->machineLocal));
1680 case FlushedInt52: {
1681 LValue value = lowInt52(m_node->child1());
1682 m_out.store64(value, addressFor(data->machineLocal));
1687 LValue value = lowCell(m_node->child1());
1688 m_out.store64(value, addressFor(data->machineLocal));
1692 case FlushedBoolean: {
1693 speculateBoolean(m_node->child1());
1695 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1696 addressFor(data->machineLocal));
1701 DFG_CRASH(m_graph, m_node, "Bad flush format");
1708 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1711 void compileToObjectOrCallObjectConstructor()
1713 LValue value = lowJSValue(m_node->child1());
1715 LBasicBlock isCellCase = m_out.newBlock();
1716 LBasicBlock slowCase = m_out.newBlock();
1717 LBasicBlock continuation = m_out.newBlock();
1719 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1721 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1722 ValueFromBlock fastResult = m_out.anchor(value);
1723 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
1725 m_out.appendTo(slowCase, continuation);
1727 ValueFromBlock slowResult;
1728 if (m_node->op() == ToObject) {
1729 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
1730 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToObject), m_callFrame, weakPointer(globalObject), value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
1732 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCallObjectConstructor), m_callFrame, frozenPointer(m_node->cellOperand()), value));
1733 m_out.jump(continuation);
1735 m_out.appendTo(continuation, lastNext);
1736 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1739 void compileToThis()
1741 LValue value = lowJSValue(m_node->child1());
1743 LBasicBlock isCellCase = m_out.newBlock();
1744 LBasicBlock slowCase = m_out.newBlock();
1745 LBasicBlock continuation = m_out.newBlock();
1748 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1750 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1751 ValueFromBlock fastResult = m_out.anchor(value);
1754 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
1755 m_out.constInt32(OverridesToThis)),
1756 usually(continuation), rarely(slowCase));
1758 m_out.appendTo(slowCase, continuation);
1759 J_JITOperation_EJ function;
1760 if (m_graph.isStrictModeFor(m_node->origin.semantic))
1761 function = operationToThisStrict;
1763 function = operationToThis;
1764 ValueFromBlock slowResult = m_out.anchor(
1765 vmCall(Int64, m_out.operation(function), m_callFrame, value));
1766 m_out.jump(continuation);
1768 m_out.appendTo(continuation, lastNext);
1769 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1772 void compileValueAdd()
1774 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
1775 JITAddIC* addIC = codeBlock()->addJITAddIC(arithProfile);
1776 auto repatchingFunction = operationValueAddOptimize;
1777 auto nonRepatchingFunction = operationValueAdd;
1778 compileMathIC(addIC, repatchingFunction, nonRepatchingFunction);
1781 template <typename Generator>
1782 void compileMathIC(JITUnaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1784 Node* node = m_node;
1786 LValue operand = lowJSValue(node->child1());
1788 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1789 patchpoint->appendSomeRegister(operand);
1790 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1791 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1792 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
1793 patchpoint->numGPScratchRegisters = 1;
1794 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1795 State* state = &m_ftlState;
1796 patchpoint->setGenerator(
1797 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1798 AllowMacroScratchRegisterUsage allowScratch(jit);
1800 Box<CCallHelpers::JumpList> exceptions =
1801 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1803 #if ENABLE(MATH_IC_STATS)
1804 auto inlineStart = jit.label();
1807 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1808 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
1810 bool shouldEmitProfiling = false;
1811 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1813 if (generatedInline) {
1814 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1815 auto done = jit.label();
1816 params.addLatePath([=] (CCallHelpers& jit) {
1817 AllowMacroScratchRegisterUsage allowScratch(jit);
1818 mathICGenerationState->slowPathJumps.link(&jit);
1819 mathICGenerationState->slowPathStart = jit.label();
1820 #if ENABLE(MATH_IC_STATS)
1821 auto slowPathStart = jit.label();
1824 if (mathICGenerationState->shouldSlowPathRepatch) {
1825 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1826 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1827 mathICGenerationState->slowPathCall = call.call();
1829 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1830 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1831 mathICGenerationState->slowPathCall = call.call();
1833 jit.jump().linkTo(done, &jit);
1835 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1836 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1839 #if ENABLE(MATH_IC_STATS)
1840 auto slowPathEnd = jit.label();
1841 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1842 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
1843 mathIC->m_generatedCodeSize += size;
1849 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1850 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1853 #if ENABLE(MATH_IC_STATS)
1854 auto inlineEnd = jit.label();
1855 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1856 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
1857 mathIC->m_generatedCodeSize += size;
1862 setJSValue(patchpoint);
1865 template <typename Generator>
1866 void compileMathIC(JITBinaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1868 Node* node = m_node;
1870 LValue left = lowJSValue(node->child1());
1871 LValue right = lowJSValue(node->child2());
1873 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
1874 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
1876 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1877 patchpoint->appendSomeRegister(left);
1878 patchpoint->appendSomeRegister(right);
1879 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1880 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1881 RefPtr<PatchpointExceptionHandle> exceptionHandle =
1882 preparePatchpointForExceptions(patchpoint);
1883 patchpoint->numGPScratchRegisters = 1;
1884 patchpoint->numFPScratchRegisters = 2;
1885 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1886 State* state = &m_ftlState;
1887 patchpoint->setGenerator(
1888 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1889 AllowMacroScratchRegisterUsage allowScratch(jit);
1891 Box<CCallHelpers::JumpList> exceptions =
1892 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1894 #if ENABLE(MATH_IC_STATS)
1895 auto inlineStart = jit.label();
1898 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1899 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
1900 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
1901 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
1903 bool shouldEmitProfiling = false;
1904 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1906 if (generatedInline) {
1907 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1908 auto done = jit.label();
1909 params.addLatePath([=] (CCallHelpers& jit) {
1910 AllowMacroScratchRegisterUsage allowScratch(jit);
1911 mathICGenerationState->slowPathJumps.link(&jit);
1912 mathICGenerationState->slowPathStart = jit.label();
1913 #if ENABLE(MATH_IC_STATS)
1914 auto slowPathStart = jit.label();
1917 if (mathICGenerationState->shouldSlowPathRepatch) {
1918 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1919 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1920 mathICGenerationState->slowPathCall = call.call();
1922 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1923 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1924 mathICGenerationState->slowPathCall = call.call();
1926 jit.jump().linkTo(done, &jit);
1928 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1929 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1932 #if ENABLE(MATH_IC_STATS)
1933 auto slowPathEnd = jit.label();
1934 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1935 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
1936 mathIC->m_generatedCodeSize += size;
1942 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1943 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1946 #if ENABLE(MATH_IC_STATS)
1947 auto inlineEnd = jit.label();
1948 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1949 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
1950 mathIC->m_generatedCodeSize += size;
1955 setJSValue(patchpoint);
1958 void compileStrCat()
1961 if (m_node->child3()) {
1963 Int64, m_out.operation(operationStrCat3), m_callFrame,
1964 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1965 lowJSValue(m_node->child2(), ManualOperandSpeculation),
1966 lowJSValue(m_node->child3(), ManualOperandSpeculation));
1969 Int64, m_out.operation(operationStrCat2), m_callFrame,
1970 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1971 lowJSValue(m_node->child2(), ManualOperandSpeculation));
1976 void compileArithAddOrSub()
1978 bool isSub = m_node->op() == ArithSub;
1979 switch (m_node->binaryUseKind()) {
1981 LValue left = lowInt32(m_node->child1());
1982 LValue right = lowInt32(m_node->child2());
1984 if (!shouldCheckOverflow(m_node->arithMode())) {
1985 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
1989 CheckValue* result =
1990 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1991 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1997 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)
1998 && !abstractValue(m_node->child2()).couldBeType(SpecInt52Only)) {
2000 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2001 LValue right = lowInt52(m_node->child2(), kind);
2002 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
2006 LValue left = lowInt52(m_node->child1());
2007 LValue right = lowInt52(m_node->child2());
2008 CheckValue* result =
2009 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2010 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2015 case DoubleRepUse: {
2016 LValue C1 = lowDouble(m_node->child1());
2017 LValue C2 = lowDouble(m_node->child2());
2019 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2025 DFG_CRASH(m_graph, m_node, "Bad use kind");
2029 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2030 JITSubIC* subIC = codeBlock()->addJITSubIC(arithProfile);
2031 auto repatchingFunction = operationValueSubOptimize;
2032 auto nonRepatchingFunction = operationValueSub;
2033 compileMathIC(subIC, repatchingFunction, nonRepatchingFunction);
2038 DFG_CRASH(m_graph, m_node, "Bad use kind");
2043 void compileArithClz32()
2045 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2046 LValue operand = lowInt32(m_node->child1());
2047 setInt32(m_out.ctlz32(operand));
2050 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2051 LValue argument = lowJSValue(m_node->child1());
2052 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
2056 void compileArithMul()
2058 switch (m_node->binaryUseKind()) {
2060 LValue left = lowInt32(m_node->child1());
2061 LValue right = lowInt32(m_node->child2());
2065 if (!shouldCheckOverflow(m_node->arithMode()))
2066 result = m_out.mul(left, right);
2068 CheckValue* speculation = m_out.speculateMul(left, right);
2069 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2070 result = speculation;
2073 if (shouldCheckNegativeZero(m_node->arithMode())) {
2074 LBasicBlock slowCase = m_out.newBlock();
2075 LBasicBlock continuation = m_out.newBlock();
2078 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2080 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2081 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2082 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2083 m_out.jump(continuation);
2084 m_out.appendTo(continuation, lastNext);
2093 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2094 LValue right = lowInt52(m_node->child2(), opposite(kind));
2096 CheckValue* result = m_out.speculateMul(left, right);
2097 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2099 if (shouldCheckNegativeZero(m_node->arithMode())) {
2100 LBasicBlock slowCase = m_out.newBlock();
2101 LBasicBlock continuation = m_out.newBlock();
2104 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2106 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2107 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2108 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2109 m_out.jump(continuation);
2110 m_out.appendTo(continuation, lastNext);
2117 case DoubleRepUse: {
2119 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2124 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2125 JITMulIC* mulIC = codeBlock()->addJITMulIC(arithProfile);
2126 auto repatchingFunction = operationValueMulOptimize;
2127 auto nonRepatchingFunction = operationValueMul;
2128 compileMathIC(mulIC, repatchingFunction, nonRepatchingFunction);
2133 DFG_CRASH(m_graph, m_node, "Bad use kind");
2138 void compileArithDiv()
2140 switch (m_node->binaryUseKind()) {
2142 LValue numerator = lowInt32(m_node->child1());
2143 LValue denominator = lowInt32(m_node->child2());
2145 if (shouldCheckNegativeZero(m_node->arithMode())) {
2146 LBasicBlock zeroNumerator = m_out.newBlock();
2147 LBasicBlock numeratorContinuation = m_out.newBlock();
2150 m_out.isZero32(numerator),
2151 rarely(zeroNumerator), usually(numeratorContinuation));
2153 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2156 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2158 m_out.jump(numeratorContinuation);
2160 m_out.appendTo(numeratorContinuation, innerLastNext);
2163 if (shouldCheckOverflow(m_node->arithMode())) {
2164 LBasicBlock unsafeDenominator = m_out.newBlock();
2165 LBasicBlock continuation = m_out.newBlock();
2167 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2169 m_out.above(adjustedDenominator, m_out.int32One),
2170 usually(continuation), rarely(unsafeDenominator));
2172 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2173 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2174 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2175 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2176 m_out.jump(continuation);
2178 m_out.appendTo(continuation, lastNext);
2179 LValue result = m_out.div(numerator, denominator);
2181 Overflow, noValue(), 0,
2182 m_out.notEqual(m_out.mul(result, denominator), numerator));
2185 setInt32(m_out.chillDiv(numerator, denominator));
2190 case DoubleRepUse: {
2191 setDouble(m_out.doubleDiv(
2192 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2197 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2202 DFG_CRASH(m_graph, m_node, "Bad use kind");
2207 void compileArithMod()
2209 switch (m_node->binaryUseKind()) {
2211 LValue numerator = lowInt32(m_node->child1());
2212 LValue denominator = lowInt32(m_node->child2());
2215 if (shouldCheckOverflow(m_node->arithMode())) {
2216 LBasicBlock unsafeDenominator = m_out.newBlock();
2217 LBasicBlock continuation = m_out.newBlock();
2219 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2221 m_out.above(adjustedDenominator, m_out.int32One),
2222 usually(continuation), rarely(unsafeDenominator));
2224 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2225 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2226 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2227 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2228 m_out.jump(continuation);
2230 m_out.appendTo(continuation, lastNext);
2231 LValue result = m_out.mod(numerator, denominator);
2234 remainder = m_out.chillMod(numerator, denominator);
2236 if (shouldCheckNegativeZero(m_node->arithMode())) {
2237 LBasicBlock negativeNumerator = m_out.newBlock();
2238 LBasicBlock numeratorContinuation = m_out.newBlock();
2241 m_out.lessThan(numerator, m_out.int32Zero),
2242 unsure(negativeNumerator), unsure(numeratorContinuation));
2244 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2246 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2248 m_out.jump(numeratorContinuation);
2250 m_out.appendTo(numeratorContinuation, innerLastNext);
2253 setInt32(remainder);
2257 case DoubleRepUse: {
2259 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2264 DFG_CRASH(m_graph, m_node, "Bad use kind");
2269 void compileArithMinOrMax()
2271 switch (m_node->binaryUseKind()) {
2273 LValue left = lowInt32(m_node->child1());
2274 LValue right = lowInt32(m_node->child2());
2278 m_node->op() == ArithMin
2279 ? m_out.lessThan(left, right)
2280 : m_out.lessThan(right, left),
2285 case DoubleRepUse: {
2286 LValue left = lowDouble(m_node->child1());
2287 LValue right = lowDouble(m_node->child2());
2289 LBasicBlock notLessThan = m_out.newBlock();
2290 LBasicBlock continuation = m_out.newBlock();
2292 Vector<ValueFromBlock, 2> results;
2294 results.append(m_out.anchor(left));
2296 m_node->op() == ArithMin
2297 ? m_out.doubleLessThan(left, right)
2298 : m_out.doubleGreaterThan(left, right),
2299 unsure(continuation), unsure(notLessThan));
2301 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2302 results.append(m_out.anchor(m_out.select(
2303 m_node->op() == ArithMin
2304 ? m_out.doubleGreaterThanOrEqual(left, right)
2305 : m_out.doubleLessThanOrEqual(left, right),
2306 right, m_out.constDouble(PNaN))));
2307 m_out.jump(continuation);
2309 m_out.appendTo(continuation, lastNext);
2310 setDouble(m_out.phi(Double, results));
2315 DFG_CRASH(m_graph, m_node, "Bad use kind");
2320 void compileArithAbs()
2322 switch (m_node->child1().useKind()) {
2324 LValue value = lowInt32(m_node->child1());
2326 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2327 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2329 if (shouldCheckOverflow(m_node->arithMode()))
2330 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2336 case DoubleRepUse: {
2337 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2342 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2343 LValue argument = lowJSValue(m_node->child1());
2344 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2351 void compileArithUnary()
2353 if (m_node->child1().useKind() == DoubleRepUse) {
2354 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
2357 LValue argument = lowJSValue(m_node->child1());
2358 LValue result = vmCall(Double, m_out.operation(DFG::arithUnaryOperation(m_node->arithUnaryType())), m_callFrame, argument);
2362 void compileArithPow()
2364 if (m_node->child2().useKind() == Int32Use)
2365 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2367 LValue base = lowDouble(m_node->child1());
2368 LValue exponent = lowDouble(m_node->child2());
2370 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2371 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2372 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2373 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2374 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2375 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2376 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2377 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2378 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2379 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2380 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2381 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2382 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2383 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2384 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2385 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2386 LBasicBlock powBlock = m_out.newBlock();
2387 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2388 LBasicBlock continuation = m_out.newBlock();
2390 LValue integerExponent = m_out.doubleToInt(exponent);
2391 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2392 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2393 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2395 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2396 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2397 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2399 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2400 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2401 m_out.jump(continuation);
2403 // If y is NaN, the result is NaN.
2404 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2405 LValue exponentIsNaN;
2406 if (provenType(m_node->child2()) & SpecDoubleNaN)
2407 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2409 exponentIsNaN = m_out.booleanFalse;
2410 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2412 // If abs(x) is 1 and y is +infinity, the result is NaN.
2413 // If abs(x) is 1 and y is -infinity, the result is NaN.
2415 // Test if base == 1.
2416 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2417 LValue absoluteBase = m_out.doubleAbs(base);
2418 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2419 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2421 // Test if abs(y) == Infinity.
2422 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2423 LValue absoluteExponent = m_out.doubleAbs(exponent);
2424 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2425 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2427 // If y == 0.5 or y == -0.5, handle it through SQRT.
2428 // We have be carefuly with -0 and -Infinity.
2431 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2432 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2433 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2436 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2437 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2438 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2439 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2441 // Test if abs(x) == Infinity.
2442 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2443 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2444 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2446 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2447 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2448 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2449 m_out.jump(continuation);
2451 // The exponent is 0.5, the base is infinite, the result is always infinite.
2452 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2453 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2454 m_out.jump(continuation);
2456 // Test if y == -0.5
2457 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2458 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2459 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2462 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2463 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2464 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2466 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2467 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2468 m_out.jump(continuation);
2470 // Test if abs(x) == Infinity.
2471 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2472 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2473 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2475 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2476 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2477 LValue sqrtBase = m_out.doubleSqrt(base);
2478 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2479 m_out.jump(continuation);
2481 // The exponent is -0.5, the base is infinite, the result is always zero.
2482 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2483 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2484 m_out.jump(continuation);
2486 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2487 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2488 m_out.jump(continuation);
2490 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2491 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2492 m_out.jump(continuation);
2494 m_out.appendTo(continuation, lastNext);
2495 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2499 void compileArithRandom()
2501 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2503 // Inlined WeakRandom::advance().
2504 // uint64_t x = m_low;
2505 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2506 LValue low = m_out.load64(m_out.absolute(lowAddress));
2507 // uint64_t y = m_high;
2508 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2509 LValue high = m_out.load64(m_out.absolute(highAddress));
2511 m_out.store64(high, m_out.absolute(lowAddress));
2514 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2517 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2519 // x ^= y ^ (y >> 26);
2520 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2523 m_out.store64(phase3, m_out.absolute(highAddress));
2526 LValue random64 = m_out.add(phase3, high);
2528 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2529 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2531 LValue double53Integer = m_out.intToDouble(random53);
2533 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2534 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2535 static const double scale = 1.0 / (1ULL << 53);
2537 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2538 // It just reduces the exp part of the given 53bit double integer.
2539 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2540 // Now we get 53bit precision random double value in [0, 1).
2541 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2546 void compileArithRound()
2548 if (m_node->child1().useKind() == DoubleRepUse) {
2549 LValue result = nullptr;
2550 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2551 LValue value = lowDouble(m_node->child1());
2552 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2554 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2555 LBasicBlock continuation = m_out.newBlock();
2557 LValue value = lowDouble(m_node->child1());
2558 LValue integerValue = m_out.doubleCeil(value);
2559 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2561 LValue realPart = m_out.doubleSub(integerValue, value);
2563 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2565 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2566 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2567 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2568 m_out.jump(continuation);
2569 m_out.appendTo(continuation, lastNext);
2571 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2574 if (producesInteger(m_node->arithRoundingMode())) {
2575 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2576 setInt32(integerValue);
2582 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2583 LValue argument = lowJSValue(m_node->child1());
2584 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2587 void compileArithFloor()
2589 if (m_node->child1().useKind() == DoubleRepUse) {
2590 LValue value = lowDouble(m_node->child1());
2591 LValue integerValue = m_out.doubleFloor(value);
2592 if (producesInteger(m_node->arithRoundingMode()))
2593 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2595 setDouble(integerValue);
2598 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2599 LValue argument = lowJSValue(m_node->child1());
2600 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2603 void compileArithCeil()
2605 if (m_node->child1().useKind() == DoubleRepUse) {
2606 LValue value = lowDouble(m_node->child1());
2607 LValue integerValue = m_out.doubleCeil(value);
2608 if (producesInteger(m_node->arithRoundingMode()))
2609 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2611 setDouble(integerValue);
2614 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2615 LValue argument = lowJSValue(m_node->child1());
2616 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2619 void compileArithTrunc()
2621 if (m_node->child1().useKind() == DoubleRepUse) {
2622 LValue value = lowDouble(m_node->child1());
2623 LValue result = m_out.doubleTrunc(value);
2624 if (producesInteger(m_node->arithRoundingMode()))
2625 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2630 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2631 LValue argument = lowJSValue(m_node->child1());
2632 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
2635 void compileArithSqrt()
2637 if (m_node->child1().useKind() == DoubleRepUse) {
2638 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
2641 LValue argument = lowJSValue(m_node->child1());
2642 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
2646 void compileArithFRound()
2648 if (m_node->child1().useKind() == DoubleRepUse) {
2649 setDouble(m_out.fround(lowDouble(m_node->child1())));
2652 LValue argument = lowJSValue(m_node->child1());
2653 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
2657 void compileArithNegate()
2659 switch (m_node->child1().useKind()) {
2661 LValue value = lowInt32(m_node->child1());
2664 if (!shouldCheckOverflow(m_node->arithMode()))
2665 result = m_out.neg(value);
2666 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
2667 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
2668 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
2671 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
2672 result = m_out.neg(value);
2680 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)) {
2682 LValue value = lowWhicheverInt52(m_node->child1(), kind);
2683 LValue result = m_out.neg(value);
2684 if (shouldCheckNegativeZero(m_node->arithMode()))
2685 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2686 setInt52(result, kind);
2690 LValue value = lowInt52(m_node->child1());
2691 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
2692 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
2693 if (shouldCheckNegativeZero(m_node->arithMode()))
2694 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2699 case DoubleRepUse: {
2700 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
2705 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2706 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2707 JITNegIC* negIC = codeBlock()->addJITNegIC(arithProfile);
2708 auto repatchingFunction = operationArithNegateOptimize;
2709 auto nonRepatchingFunction = operationArithNegate;
2710 compileMathIC(negIC, repatchingFunction, nonRepatchingFunction);
2715 void compileBitAnd()
2717 if (m_node->isBinaryUseKind(UntypedUse)) {
2718 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
2721 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2726 if (m_node->isBinaryUseKind(UntypedUse)) {
2727 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
2730 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2733 void compileBitXor()
2735 if (m_node->isBinaryUseKind(UntypedUse)) {
2736 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
2739 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2742 void compileBitRShift()
2744 if (m_node->isBinaryUseKind(UntypedUse)) {
2745 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
2748 setInt32(m_out.aShr(
2749 lowInt32(m_node->child1()),
2750 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2753 void compileBitLShift()
2755 if (m_node->isBinaryUseKind(UntypedUse)) {
2756 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
2760 lowInt32(m_node->child1()),
2761 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2764 void compileBitURShift()
2766 if (m_node->isBinaryUseKind(UntypedUse)) {
2767 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
2770 setInt32(m_out.lShr(
2771 lowInt32(m_node->child1()),
2772 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2775 void compileUInt32ToNumber()
2777 LValue value = lowInt32(m_node->child1());
2779 if (doesOverflow(m_node->arithMode())) {
2780 setStrictInt52(m_out.zeroExtPtr(value));
2784 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
2788 void compileCheckStructure()
2791 if (m_node->child1()->hasConstant())
2792 exitKind = BadConstantCache;
2794 exitKind = BadCache;
2796 switch (m_node->child1().useKind()) {
2798 case KnownCellUse: {
2799 LValue cell = lowCell(m_node->child1());
2802 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2803 exitKind, m_node->structureSet(),
2804 [&] (RegisteredStructure structure) {
2805 return weakStructureID(structure);
2810 case CellOrOtherUse: {
2811 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
2813 LBasicBlock cellCase = m_out.newBlock();
2814 LBasicBlock notCellCase = m_out.newBlock();
2815 LBasicBlock continuation = m_out.newBlock();
2818 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2820 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2822 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
2823 exitKind, m_node->structureSet(),
2824 [&] (RegisteredStructure structure) {
2825 return weakStructureID(structure);
2827 m_out.jump(continuation);
2829 m_out.appendTo(notCellCase, continuation);
2830 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
2831 m_out.jump(continuation);
2833 m_out.appendTo(continuation, lastNext);
2838 DFG_CRASH(m_graph, m_node, "Bad use kind");
2843 void compileCheckStructureOrEmpty()
2846 if (m_node->child1()->hasConstant())
2847 exitKind = BadConstantCache;
2849 exitKind = BadCache;
2851 LValue cell = lowCell(m_node->child1());
2852 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
2853 LBasicBlock notEmpty;
2854 LBasicBlock continuation;
2855 LBasicBlock lastNext;
2856 if (maySeeEmptyValue) {
2857 notEmpty = m_out.newBlock();
2858 continuation = m_out.newBlock();
2859 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
2860 lastNext = m_out.appendTo(notEmpty, continuation);
2864 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2865 exitKind, m_node->structureSet(),
2866 [&] (RegisteredStructure structure) {
2867 return weakStructureID(structure);
2870 if (maySeeEmptyValue) {
2871 m_out.jump(continuation);
2872 m_out.appendTo(continuation, lastNext);
2876 void compileCheckCell()
2878 LValue cell = lowCell(m_node->child1());
2881 BadCell, jsValueValue(cell), m_node->child1().node(),
2882 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
2885 void compileCheckBadCell()
2890 void compileCheckNotEmpty()
2892 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
2895 void compileCheckStringIdent()
2897 UniquedStringImpl* uid = m_node->uidOperand();
2898 LValue stringImpl = lowStringIdent(m_node->child1());
2899 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
2902 void compileGetExecutable()
2904 LValue cell = lowCell(m_node->child1());
2905 speculateFunction(m_node->child1(), cell);
2906 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
2909 void compileArrayify()
2911 LValue cell = lowCell(m_node->child1());
2912 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
2914 LBasicBlock unexpectedStructure = m_out.newBlock();
2915 LBasicBlock continuation = m_out.newBlock();
2917 auto isUnexpectedArray = [&] (LValue cell) {
2918 if (m_node->op() == Arrayify)
2919 return m_out.logicalNot(isArrayTypeForArrayify(cell, m_node->arrayMode()));
2921 ASSERT(m_node->op() == ArrayifyToStructure);
2922 return m_out.notEqual(m_out.load32(cell, m_heaps.JSCell_structureID), weakStructureID(m_node->structure()));
2925 m_out.branch(isUnexpectedArray(cell), rarely(unexpectedStructure), usually(continuation));
2927 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
2930 switch (m_node->arrayMode().type()) {
2933 case Array::Contiguous:
2935 Uncountable, noValue(), 0,
2936 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
2943 switch (m_node->arrayMode().type()) {
2945 vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
2948 vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
2950 case Array::Contiguous:
2951 vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
2953 case Array::ArrayStorage:
2954 case Array::SlowPutArrayStorage:
2955 vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
2958 DFG_CRASH(m_graph, m_node, "Bad array type");
2962 speculate(BadIndexingType, jsValueValue(cell), 0, isUnexpectedArray(cell));
2963 m_out.jump(continuation);
2965 m_out.appendTo(continuation, lastNext);
2968 void compilePutStructure()
2970 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
2972 RegisteredStructure oldStructure = m_node->transition()->previous;
2973 RegisteredStructure newStructure = m_node->transition()->next;
2974 ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
2975 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
2976 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
2978 LValue cell = lowCell(m_node->child1());
2980 weakStructureID(newStructure),
2981 cell, m_heaps.JSCell_structureID);
2984 void compileGetById(AccessType type)
2986 ASSERT(type == AccessType::Get || type == AccessType::TryGet);
2987 switch (m_node->child1().useKind()) {
2989 setJSValue(getById(lowCell(m_node->child1()), type));
2994 // This is pretty weird, since we duplicate the slow path both here and in the
2995 // code generated by the IC. We should investigate making this less bad.
2996 // https://bugs.webkit.org/show_bug.cgi?id=127830
2997 LValue value = lowJSValue(m_node->child1());
2999 LBasicBlock cellCase = m_out.newBlock();
3000 LBasicBlock notCellCase = m_out.newBlock();
3001 LBasicBlock continuation = m_out.newBlock();
3004 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3006 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3007 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
3008 m_out.jump(continuation);
3010 J_JITOperation_EJI getByIdFunction;
3011 if (type == AccessType::Get)
3012 getByIdFunction = operationGetByIdGeneric;
3014 getByIdFunction = operationTryGetByIdGeneric;
3016 m_out.appendTo(notCellCase, continuation);
3017 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3018 Int64, m_out.operation(getByIdFunction),
3020 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3021 m_out.jump(continuation);
3023 m_out.appendTo(continuation, lastNext);
3024 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3029 DFG_CRASH(m_graph, m_node, "Bad use kind");
3034 void compileGetByIdWithThis()
3036 if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
3037 setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
3039 LValue base = lowJSValue(m_node->child1());
3040 LValue thisValue = lowJSValue(m_node->child2());
3042 LBasicBlock baseCellCase = m_out.newBlock();
3043 LBasicBlock notCellCase = m_out.newBlock();
3044 LBasicBlock thisValueCellCase = m_out.newBlock();
3045 LBasicBlock continuation = m_out.newBlock();
3048 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
3050 LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
3053 isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
3055 m_out.appendTo(thisValueCellCase, notCellCase);
3056 ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
3057 m_out.jump(continuation);
3059 m_out.appendTo(notCellCase, continuation);
3060 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3061 Int64, m_out.operation(operationGetByIdWithThis),
3062 m_callFrame, base, thisValue,
3063 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3064 m_out.jump(continuation);
3066 m_out.appendTo(continuation, lastNext);
3067 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3072 void compileGetByValWithThis()
3074 LValue base = lowJSValue(m_node->child1());
3075 LValue thisValue = lowJSValue(m_node->child2());
3076 LValue subscript = lowJSValue(m_node->child3());
3078 LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
3082 void compilePutByIdWithThis()
3084 LValue base = lowJSValue(m_node->child1());
3085 LValue thisValue = lowJSValue(m_node->child2());
3086 LValue value = lowJSValue(m_node->child3());
3088 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
3089 m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
3092 void compilePutByValWithThis()
3094 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
3095 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
3096 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
3097 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
3099 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
3100 m_callFrame, base, thisValue, property, value);
3103 void compileAtomicsReadModifyWrite()
3105 TypedArrayType type = m_node->arrayMode().typedArrayType();
3106 unsigned numExtraArgs = numExtraAtomicsArgs(m_node->op());
3107 Edge baseEdge = m_graph.child(m_node, 0);
3108 Edge indexEdge = m_graph.child(m_node, 1);
3109 Edge argEdges[maxNumExtraAtomicsArgs];
3110 for (unsigned i = numExtraArgs; i--;)
3111 argEdges[i] = m_graph.child(m_node, 2 + i);
3112 Edge storageEdge = m_graph.child(m_node, 2 + numExtraArgs);
3114 auto operation = [&] () -> LValue {
3115 switch (m_node->op()) {
3117 return m_out.operation(operationAtomicsAdd);
3119 return m_out.operation(operationAtomicsAnd);
3120 case AtomicsCompareExchange:
3121 return m_out.operation(operationAtomicsCompareExchange);
3122 case AtomicsExchange:
3123 return m_out.operation(operationAtomicsExchange);
3125 return m_out.operation(operationAtomicsLoad);
3127 return m_out.operation(operationAtomicsOr);
3129 return m_out.operation(operationAtomicsStore);
3131 return m_out.operation(operationAtomicsSub);
3133 return m_out.operation(operationAtomicsXor);
3135 RELEASE_ASSERT_NOT_REACHED();
3141 Vector<LValue> args;
3142 args.append(m_callFrame);
3143 args.append(lowJSValue(baseEdge));
3144 args.append(lowJSValue(indexEdge));
3145 for (unsigned i = 0; i < numExtraArgs; ++i)
3146 args.append(lowJSValue(argEdges[i]));