2 * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLLowerDFGToB3.h"
31 #include "AirGenerationContext.h"
32 #include "AllowMacroScratchRegisterUsage.h"
33 #include "B3StackmapGenerationParams.h"
34 #include "CallFrameShuffler.h"
35 #include "CodeBlockWithJITType.h"
36 #include "DFGAbstractInterpreterInlines.h"
37 #include "DFGDominators.h"
38 #include "DFGInPlaceAbstractState.h"
39 #include "DFGOSRAvailabilityAnalysisPhase.h"
40 #include "DFGOSRExitFuzz.h"
41 #include "DirectArguments.h"
42 #include "FTLAbstractHeapRepository.h"
43 #include "FTLAvailableRecovery.h"
44 #include "FTLExceptionTarget.h"
45 #include "FTLForOSREntryJITCode.h"
46 #include "FTLFormattedValue.h"
47 #include "FTLLazySlowPathCall.h"
48 #include "FTLLoweredNodeValue.h"
49 #include "FTLOperations.h"
50 #include "FTLOutput.h"
51 #include "FTLPatchpointExceptionHandle.h"
52 #include "FTLThunks.h"
53 #include "FTLWeightedTarget.h"
54 #include "JITAddGenerator.h"
55 #include "JITBitAndGenerator.h"
56 #include "JITBitOrGenerator.h"
57 #include "JITBitXorGenerator.h"
58 #include "JITDivGenerator.h"
59 #include "JITInlineCacheGenerator.h"
60 #include "JITLeftShiftGenerator.h"
61 #include "JITMulGenerator.h"
62 #include "JITRightShiftGenerator.h"
63 #include "JITSubGenerator.h"
64 #include "JSCInlines.h"
65 #include "JSGeneratorFunction.h"
66 #include "JSLexicalEnvironment.h"
67 #include "OperandsInlines.h"
68 #include "ScopedArguments.h"
69 #include "ScopedArgumentsTable.h"
70 #include "ScratchRegisterAllocator.h"
71 #include "SetupVarargsFrame.h"
72 #include "ShadowChicken.h"
73 #include "StructureStubInfo.h"
74 #include "VirtualRegister.h"
80 #include <unordered_set>
82 #include <wtf/ProcessID.h>
84 namespace JSC { namespace FTL {
91 std::atomic<int> compileCounter;
94 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable()
99 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
100 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
102 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
103 if (nodeIndex != UINT_MAX)
104 dataLog(", node @", nodeIndex);
110 // Using this instead of typeCheck() helps to reduce the load on B3, by creating
111 // significantly less dead code.
112 #define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
113 FormattedValue _ftc_lowValue = (lowValue); \
114 Edge _ftc_highValue = (highValue); \
115 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
116 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
118 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
121 #define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
122 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
125 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
127 LowerDFGToB3(State& state)
128 : m_graph(state.graph)
131 , m_proc(*state.proc)
132 , m_state(state.graph)
133 , m_interpreter(state.graph, m_state)
139 State* state = &m_ftlState;
142 if (verboseCompilationEnabled()) {
144 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
145 "_", codeBlock()->hash());
149 m_graph.ensureDominators();
151 if (verboseCompilationEnabled())
152 dataLog("Function ready, beginning lowering.\n");
154 m_out.initialize(m_heaps);
156 // We use prologue frequency for all of the initialization code.
157 m_out.setFrequency(1);
159 m_prologue = m_out.newBlock();
160 LBasicBlock stackOverflow = m_out.newBlock();
161 m_handleExceptions = m_out.newBlock();
163 LBasicBlock checkArguments = m_out.newBlock();
165 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
166 m_highBlock = m_graph.block(blockIndex);
169 m_out.setFrequency(m_highBlock->executionCount);
170 m_blocks.add(m_highBlock, m_out.newBlock());
173 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
174 m_out.setFrequency(1);
176 m_out.appendTo(m_prologue, stackOverflow);
177 m_out.initializeConstants(m_proc, m_prologue);
178 createPhiVariables();
180 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
181 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
182 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
183 state->capturedValue = capturedBase->slot();
185 auto preOrder = m_graph.blocksInPreOrder();
187 // We should not create any alloca's after this point, since they will cease to
188 // be mem2reg candidates.
190 m_callFrame = m_out.framePointer();
191 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
192 m_tagMask = m_out.constInt64(TagMask);
194 // Make sure that B3 knows that we really care about the mask registers. This forces the
195 // constants to be materialized in registers.
196 m_proc.addFastConstant(m_tagTypeNumber->key());
197 m_proc.addFastConstant(m_tagMask->key());
199 m_out.storePtr(m_out.constIntPtr(codeBlock()), addressFor(JSStack::CodeBlock));
202 didOverflowStack(), rarely(stackOverflow), usually(checkArguments));
204 m_out.appendTo(stackOverflow, m_handleExceptions);
205 m_out.call(m_out.voidType, m_out.operation(operationThrowStackOverflowError), m_callFrame, m_out.constIntPtr(codeBlock()));
206 m_out.patchpoint(Void)->setGenerator(
207 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
208 // We are terminal, so we can clobber everything. That's why we don't claim to
210 AllowMacroScratchRegisterUsage allowScratch(jit);
212 jit.copyCalleeSavesToVMCalleeSavesBuffer();
213 jit.move(CCallHelpers::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0);
214 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
215 CCallHelpers::Call call = jit.call();
216 jit.jumpToExceptionHandler();
219 [=] (LinkBuffer& linkBuffer) {
220 linkBuffer.link(call, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
225 m_out.appendTo(m_handleExceptions, checkArguments);
226 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
227 m_out.patchpoint(Void)->setGenerator(
228 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
229 CCallHelpers::Jump jump = jit.jump();
231 [=] (LinkBuffer& linkBuffer) {
232 linkBuffer.link(jump, linkBuffer.locationOf(*exceptionHandler));
237 m_out.appendTo(checkArguments, lowBlock(m_graph.block(0)));
238 availabilityMap().clear();
239 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
240 for (unsigned i = codeBlock()->numParameters(); i--;) {
241 availabilityMap().m_locals.argument(i) =
242 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
245 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
246 for (unsigned i = codeBlock()->numParameters(); i--;) {
247 Node* node = m_graph.m_arguments[i];
248 VirtualRegister operand = virtualRegisterForArgument(i);
250 LValue jsValue = m_out.load64(addressFor(operand));
253 DFG_ASSERT(m_graph, node, operand == node->stackAccessData()->machineLocal);
255 // This is a hack, but it's an effective one. It allows us to do CSE on the
256 // primordial load of arguments. This assumes that the GetLocal that got put in
257 // place of the original SetArgument doesn't have any effects before it. This
259 m_loadedArgumentValues.add(node, jsValue);
262 switch (m_graph.m_argumentFormats[i]) {
264 speculate(BadType, jsValueValue(jsValue), node, isNotInt32(jsValue));
267 speculate(BadType, jsValueValue(jsValue), node, isNotBoolean(jsValue));
270 speculate(BadType, jsValueValue(jsValue), node, isNotCell(jsValue));
275 DFG_CRASH(m_graph, node, "Bad flush format for argument");
279 m_out.jump(lowBlock(m_graph.block(0)));
281 for (DFG::BasicBlock* block : preOrder)
284 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
285 // to happen last because our abstract heaps are generated lazily. They have to be
286 // generated lazily because we have an infiniten number of numbered, indexed, and
287 // absolute heaps. We only become aware of the ones we actually mention while lowering.
288 m_heaps.computeRangesAndDecorateInstructions();
290 // We create all Phi's up front, but we may then decide not to compile the basic block
291 // that would have contained one of them. So this creates orphans, which triggers B3
292 // validation failures. Calling this fixes the issue.
294 // Note that you should avoid the temptation to make this call conditional upon
295 // validation being enabled. B3 makes no guarantees of any kind of correctness when
296 // dealing with IR that would have failed validation. For example, it would be valid to
297 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
298 // if any orphans were around. We might even have such phases already.
299 m_proc.deleteOrphans();
301 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
302 m_out.applyBlockOrder();
307 void createPhiVariables()
309 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
310 DFG::BasicBlock* block = m_graph.block(blockIndex);
313 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
314 Node* node = block->at(nodeIndex);
315 if (node->op() != DFG::Phi)
318 switch (node->flags() & NodeResultMask) {
319 case NodeResultDouble:
320 type = m_out.doubleType;
322 case NodeResultInt32:
325 case NodeResultInt52:
328 case NodeResultBoolean:
329 type = m_out.boolean;
335 DFG_CRASH(m_graph, node, "Bad Phi node result type");
338 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
343 void compileBlock(DFG::BasicBlock* block)
348 if (verboseCompilationEnabled())
349 dataLog("Compiling block ", *block, "\n");
353 // Make sure that any blocks created while lowering code in the high block have the frequency of
354 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
355 // something roughly approximate for things like register allocation.
356 m_out.setFrequency(m_highBlock->executionCount);
358 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
361 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
362 m_nextHighBlock = m_graph.block(nextBlockIndex);
366 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
368 // All of this effort to find the next block gives us the ability to keep the
369 // generated IR in roughly program order. This ought not affect the performance
370 // of the generated code (since we expect B3 to reorder things) but it will
371 // make IR dumps easier to read.
372 m_out.appendTo(lowBlock, m_nextLowBlock);
374 if (Options::ftlCrashes())
377 if (!m_highBlock->cfaHasVisited) {
378 if (verboseCompilationEnabled())
379 dataLog("Bailing because CFA didn't reach.\n");
380 crash(m_highBlock->index, UINT_MAX);
384 m_availabilityCalculator.beginBlock(m_highBlock);
387 m_state.beginBasicBlock(m_highBlock);
389 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
390 if (!compileNode(m_nodeIndex))
395 void safelyInvalidateAfterTermination()
397 if (verboseCompilationEnabled())
398 dataLog("Bailing.\n");
401 // Invalidate dominated blocks. Under normal circumstances we would expect
402 // them to be invalidated already. But you can have the CFA become more
403 // precise over time because the structures of objects change on the main
404 // thread. Failing to do this would result in weird crashes due to a value
405 // being used but not defined. Race conditions FTW!
406 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
407 DFG::BasicBlock* target = m_graph.block(blockIndex);
410 if (m_graph.m_dominators->dominates(m_highBlock, target)) {
411 if (verboseCompilationEnabled())
412 dataLog("Block ", *target, " will bail also.\n");
413 target->cfaHasVisited = false;
418 bool compileNode(unsigned nodeIndex)
420 if (!m_state.isValid()) {
421 safelyInvalidateAfterTermination();
425 m_node = m_highBlock->at(nodeIndex);
426 m_origin = m_node->origin;
427 m_out.setOrigin(m_node);
429 if (verboseCompilationEnabled())
430 dataLog("Lowering ", m_node, "\n");
432 m_availableRecoveries.resize(0);
434 m_interpreter.startExecuting();
436 switch (m_node->op()) {
446 compileDoubleConstant();
449 compileInt52Constant();
452 compileLazyJSConstant();
458 compileDoubleAsInt32();
467 compileValueToInt32();
469 case BooleanToNumber:
470 compileBooleanToNumber();
472 case ExtractOSREntryLocal:
473 compileExtractOSREntryLocal();
484 case CallObjectConstructor:
485 compileCallObjectConstructor();
498 compileArithAddOrSub();
514 compileArithMinOrMax();
529 compileArithRandom();
550 compileArithFRound();
553 compileArithNegate();
574 compileUInt32ToNumber();
577 compileCheckStructure();
583 compileCheckNotEmpty();
586 compileCheckBadCell();
592 compileGetExecutable();
594 case ArrayifyToStructure:
595 compileArrayifyToStructure();
598 compilePutStructure();
601 compileGetById(AccessType::GetPure);
605 compileGetById(AccessType::Get);
617 compilePutAccessorById();
619 case PutGetterSetterById:
620 compilePutGetterSetterById();
624 compilePutAccessorByVal();
627 compileGetButterfly();
629 case ConstantStoragePointer:
630 compileConstantStoragePointer();
632 case GetIndexedPropertyStorage:
633 compileGetIndexedPropertyStorage();
639 compileGetArrayLength();
642 compileCheckInBounds();
647 case GetMyArgumentByVal:
648 compileGetMyArgumentByVal();
661 case CreateActivation:
662 compileCreateActivation();
665 case NewGeneratorFunction:
666 compileNewFunction();
668 case CreateDirectArguments:
669 compileCreateDirectArguments();
671 case CreateScopedArguments:
672 compileCreateScopedArguments();
674 case CreateClonedArguments:
675 compileCreateClonedArguments();
684 compileNewArrayBuffer();
686 case NewArrayWithSize:
687 compileNewArrayWithSize();
690 compileNewTypedArray();
692 case GetTypedArrayByteOffset:
693 compileGetTypedArrayByteOffset();
695 case AllocatePropertyStorage:
696 compileAllocatePropertyStorage();
698 case ReallocatePropertyStorage:
699 compileReallocatePropertyStorage();
702 case CallStringConstructor:
703 compileToStringOrCallStringConstructor();
706 compileToPrimitive();
712 compileStringCharAt();
714 case StringCharCodeAt:
715 compileStringCharCodeAt();
717 case StringFromCharCode:
718 compileStringFromCharCode();
721 case GetGetterSetterByOffset:
722 compileGetByOffset();
730 case MultiGetByOffset:
731 compileMultiGetByOffset();
734 compilePutByOffset();
736 case MultiPutByOffset:
737 compileMultiPutByOffset();
740 case GetGlobalLexicalVariable:
741 compileGetGlobalVariable();
743 case PutGlobalVariable:
744 compilePutGlobalVariable();
747 compileNotifyWrite();
752 case GetArgumentCount:
753 compileGetArgumentCount();
761 case GetGlobalObject:
762 compileGetGlobalObject();
765 compileGetClosureVar();
768 compilePutClosureVar();
770 case GetFromArguments:
771 compileGetFromArguments();
774 compilePutToArguments();
779 case CompareStrictEq:
780 compileCompareStrictEq();
783 compileCompareLess();
786 compileCompareLessEq();
789 compileCompareGreater();
791 case CompareGreaterEq:
792 compileCompareGreaterEq();
798 case TailCallInlinedCaller:
800 compileCallOrConstruct();
806 case CallForwardVarargs:
807 case TailCallVarargs:
808 case TailCallVarargsInlinedCaller:
809 case TailCallForwardVarargs:
810 case TailCallForwardVarargsInlinedCaller:
811 case ConstructVarargs:
812 case ConstructForwardVarargs:
813 compileCallOrConstructVarargs();
816 compileLoadVarargs();
819 compileForwardVarargs();
834 compileForceOSRExit();
837 case ThrowReferenceError:
840 case InvalidationPoint:
841 compileInvalidationPoint();
844 compileIsUndefined();
856 compileIsArrayObject();
861 case IsArrayConstructor:
862 compileIsArrayConstructor();
868 compileIsObjectOrNull();
876 case CheckTypeInfoFlags:
877 compileCheckTypeInfoFlags();
879 case OverridesHasInstance:
880 compileOverridesHasInstance();
885 case InstanceOfCustom:
886 compileInstanceOfCustom();
889 compileCountExecution();
892 compileStoreBarrier();
894 case HasIndexedProperty:
895 compileHasIndexedProperty();
897 case HasGenericProperty:
898 compileHasGenericProperty();
900 case HasStructureProperty:
901 compileHasStructureProperty();
904 compileGetDirectPname();
906 case GetEnumerableLength:
907 compileGetEnumerableLength();
909 case GetPropertyEnumerator:
910 compileGetPropertyEnumerator();
912 case GetEnumeratorStructurePname:
913 compileGetEnumeratorStructurePname();
915 case GetEnumeratorGenericPname:
916 compileGetEnumeratorGenericPname();
919 compileToIndexString();
921 case CheckStructureImmediate:
922 compileCheckStructureImmediate();
924 case MaterializeNewObject:
925 compileMaterializeNewObject();
927 case MaterializeCreateActivation:
928 compileMaterializeCreateActivation();
930 case CheckWatchdogTimer:
931 compileCheckWatchdogTimer();
937 compileGetRestLength();
948 case SetFunctionName:
949 compileSetFunctionName();
952 compileStringReplace();
954 case GetRegExpObjectLastIndex:
955 compileGetRegExpObjectLastIndex();
957 case SetRegExpObjectLastIndex:
958 compileSetRegExpObjectLastIndex();
960 case LogShadowChickenPrologue:
961 compileLogShadowChickenPrologue();
963 case LogShadowChickenTail:
964 compileLogShadowChickenTail();
966 case RecordRegExpCachedResult:
967 compileRecordRegExpCachedResult();
975 case PhantomNewObject:
976 case PhantomNewFunction:
977 case PhantomNewGeneratorFunction:
978 case PhantomCreateActivation:
979 case PhantomDirectArguments:
980 case PhantomClonedArguments:
986 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
990 if (m_node->isTerminal())
993 if (!m_state.isValid()) {
994 safelyInvalidateAfterTermination();
998 m_availabilityCalculator.executeNode(m_node);
999 m_interpreter.executeEffects(nodeIndex);
1004 void compileUpsilon()
1006 LValue upsilonValue = nullptr;
1007 switch (m_node->child1().useKind()) {
1009 upsilonValue = lowDouble(m_node->child1());
1013 upsilonValue = lowInt32(m_node->child1());
1016 upsilonValue = lowInt52(m_node->child1());
1019 case KnownBooleanUse:
1020 upsilonValue = lowBoolean(m_node->child1());
1024 upsilonValue = lowCell(m_node->child1());
1027 upsilonValue = lowJSValue(m_node->child1());
1030 DFG_CRASH(m_graph, m_node, "Bad use kind");
1033 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1034 LValue phiNode = m_phis.get(m_node->phi());
1035 m_out.addIncomingToPhi(phiNode, upsilon);
1040 LValue phi = m_phis.get(m_node);
1041 m_out.m_block->append(phi);
1043 switch (m_node->flags() & NodeResultMask) {
1044 case NodeResultDouble:
1047 case NodeResultInt32:
1050 case NodeResultInt52:
1053 case NodeResultBoolean:
1060 DFG_CRASH(m_graph, m_node, "Bad use kind");
1065 void compileDoubleConstant()
1067 setDouble(m_out.constDouble(m_node->asNumber()));
1070 void compileInt52Constant()
1072 int64_t value = m_node->asMachineInt();
1074 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1075 setStrictInt52(m_out.constInt64(value));
1078 void compileLazyJSConstant()
1080 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1081 LazyJSValue value = m_node->lazyJSValue();
1082 patchpoint->setGenerator(
1083 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1084 value.emit(jit, JSValueRegs(params[0].gpr()));
1086 patchpoint->effects = Effects::none();
1087 setJSValue(patchpoint);
1090 void compileDoubleRep()
1092 switch (m_node->child1().useKind()) {
1093 case RealNumberUse: {
1094 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1096 LValue doubleValue = unboxDouble(value);
1098 LBasicBlock intCase = m_out.newBlock();
1099 LBasicBlock continuation = m_out.newBlock();
1101 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1103 m_out.doubleEqual(doubleValue, doubleValue),
1104 usually(continuation), rarely(intCase));
1106 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1109 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1110 isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble));
1111 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1112 m_out.jump(continuation);
1114 m_out.appendTo(continuation, lastNext);
1116 setDouble(m_out.phi(m_out.doubleType, fastResult, slowResult));
1122 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1124 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1126 LBasicBlock intCase = m_out.newBlock();
1127 LBasicBlock doubleTesting = m_out.newBlock();
1128 LBasicBlock doubleCase = m_out.newBlock();
1129 LBasicBlock nonDoubleCase = m_out.newBlock();
1130 LBasicBlock continuation = m_out.newBlock();
1133 isNotInt32(value, provenType(m_node->child1())),
1134 unsure(doubleTesting), unsure(intCase));
1136 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1138 ValueFromBlock intToDouble = m_out.anchor(
1139 m_out.intToDouble(unboxInt32(value)));
1140 m_out.jump(continuation);
1142 m_out.appendTo(doubleTesting, doubleCase);
1143 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1144 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1146 m_out.appendTo(doubleCase, nonDoubleCase);
1147 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1148 m_out.jump(continuation);
1150 if (shouldConvertNonNumber) {
1151 LBasicBlock undefinedCase = m_out.newBlock();
1152 LBasicBlock testNullCase = m_out.newBlock();
1153 LBasicBlock nullCase = m_out.newBlock();
1154 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1155 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1156 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1158 m_out.appendTo(nonDoubleCase, undefinedCase);
1159 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1160 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1162 m_out.appendTo(undefinedCase, testNullCase);
1163 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1164 m_out.jump(continuation);
1166 m_out.appendTo(testNullCase, nullCase);
1167 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1168 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1170 m_out.appendTo(nullCase, testBooleanTrueCase);
1171 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1172 m_out.jump(continuation);
1174 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1175 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1176 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1178 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1179 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1180 m_out.jump(continuation);
1182 m_out.appendTo(convertBooleanFalseCase, continuation);
1184 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1185 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCell, valueIsNotBooleanFalse);
1186 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1187 m_out.jump(continuation);
1189 m_out.appendTo(continuation, lastNext);
1190 setDouble(m_out.phi(m_out.doubleType, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1193 m_out.appendTo(nonDoubleCase, continuation);
1194 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1195 m_out.unreachable();
1197 m_out.appendTo(continuation, lastNext);
1199 setDouble(m_out.phi(m_out.doubleType, intToDouble, unboxedDouble));
1204 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1209 DFG_CRASH(m_graph, m_node, "Bad use kind");
1213 void compileDoubleAsInt32()
1215 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1216 setInt32(integerValue);
1219 void compileValueRep()
1221 switch (m_node->child1().useKind()) {
1222 case DoubleRepUse: {
1223 LValue value = lowDouble(m_node->child1());
1225 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1226 value = m_out.select(
1227 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1230 setJSValue(boxDouble(value));
1235 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1240 DFG_CRASH(m_graph, m_node, "Bad use kind");
1244 void compileInt52Rep()
1246 switch (m_node->child1().useKind()) {
1248 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1253 jsValueToStrictInt52(
1254 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1257 case DoubleRepMachineIntUse:
1259 doubleToStrictInt52(
1260 m_node->child1(), lowDouble(m_node->child1())));
1264 RELEASE_ASSERT_NOT_REACHED();
1268 void compileValueToInt32()
1270 switch (m_node->child1().useKind()) {
1272 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1276 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1281 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1282 if (isValid(value)) {
1283 setInt32(value.value());
1287 value = m_jsValueValues.get(m_node->child1().node());
1288 if (isValid(value)) {
1289 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1293 // We'll basically just get here for constants. But it's good to have this
1294 // catch-all since we often add new representations into the mix.
1296 numberOrNotCellToInt32(
1298 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1303 DFG_CRASH(m_graph, m_node, "Bad use kind");
1308 void compileBooleanToNumber()
1310 switch (m_node->child1().useKind()) {
1312 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), m_out.int32));
1317 LValue value = lowJSValue(m_node->child1());
1319 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1320 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1324 LBasicBlock booleanCase = m_out.newBlock();
1325 LBasicBlock continuation = m_out.newBlock();
1327 ValueFromBlock notBooleanResult = m_out.anchor(value);
1329 isBoolean(value, provenType(m_node->child1())),
1330 unsure(booleanCase), unsure(continuation));
1332 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1333 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1334 m_out.zeroExt(unboxBoolean(value), m_out.int64), m_tagTypeNumber));
1335 m_out.jump(continuation);
1337 m_out.appendTo(continuation, lastNext);
1338 setJSValue(m_out.phi(m_out.int64, booleanResult, notBooleanResult));
1343 RELEASE_ASSERT_NOT_REACHED();
1348 void compileExtractOSREntryLocal()
1350 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1351 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1352 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1355 void compileGetStack()
1357 // GetLocals arise only for captured variables and arguments. For arguments, we might have
1358 // already loaded it.
1359 if (LValue value = m_loadedArgumentValues.get(m_node)) {
1364 StackAccessData* data = m_node->stackAccessData();
1365 AbstractValue& value = m_state.variables().operand(data->local);
1367 DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
1368 DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
1370 if (isInt32Speculation(value.m_type))
1371 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1373 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1376 void compilePutStack()
1378 StackAccessData* data = m_node->stackAccessData();
1379 switch (data->format) {
1380 case FlushedJSValue: {
1381 LValue value = lowJSValue(m_node->child1());
1382 m_out.store64(value, addressFor(data->machineLocal));
1386 case FlushedDouble: {
1387 LValue value = lowDouble(m_node->child1());
1388 m_out.storeDouble(value, addressFor(data->machineLocal));
1392 case FlushedInt32: {
1393 LValue value = lowInt32(m_node->child1());
1394 m_out.store32(value, payloadFor(data->machineLocal));
1398 case FlushedInt52: {
1399 LValue value = lowInt52(m_node->child1());
1400 m_out.store64(value, addressFor(data->machineLocal));
1405 LValue value = lowCell(m_node->child1());
1406 m_out.store64(value, addressFor(data->machineLocal));
1410 case FlushedBoolean: {
1411 speculateBoolean(m_node->child1());
1413 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1414 addressFor(data->machineLocal));
1419 DFG_CRASH(m_graph, m_node, "Bad flush format");
1426 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1429 void compileCallObjectConstructor()
1431 LValue value = lowJSValue(m_node->child1());
1433 LBasicBlock isCellCase = m_out.newBlock();
1434 LBasicBlock slowCase = m_out.newBlock();
1435 LBasicBlock continuation = m_out.newBlock();
1437 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1439 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1440 ValueFromBlock fastResult = m_out.anchor(value);
1441 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
1443 m_out.appendTo(slowCase, continuation);
1444 ValueFromBlock slowResult = m_out.anchor(vmCall(m_out.int64, m_out.operation(operationToObject), m_callFrame, value));
1445 m_out.jump(continuation);
1447 m_out.appendTo(continuation, lastNext);
1448 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
1451 void compileToThis()
1453 LValue value = lowJSValue(m_node->child1());
1455 LBasicBlock isCellCase = m_out.newBlock();
1456 LBasicBlock slowCase = m_out.newBlock();
1457 LBasicBlock continuation = m_out.newBlock();
1460 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1462 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1463 ValueFromBlock fastResult = m_out.anchor(value);
1464 m_out.branch(isType(value, FinalObjectType), usually(continuation), rarely(slowCase));
1466 m_out.appendTo(slowCase, continuation);
1467 J_JITOperation_EJ function;
1468 if (m_graph.isStrictModeFor(m_node->origin.semantic))
1469 function = operationToThisStrict;
1471 function = operationToThis;
1472 ValueFromBlock slowResult = m_out.anchor(
1473 vmCall(m_out.int64, m_out.operation(function), m_callFrame, value));
1474 m_out.jump(continuation);
1476 m_out.appendTo(continuation, lastNext);
1477 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
1480 void compileValueAdd()
1482 emitBinarySnippet<JITAddGenerator>(operationValueAdd);
1485 void compileStrCat()
1488 if (m_node->child3()) {
1490 m_out.int64, m_out.operation(operationStrCat3), m_callFrame,
1491 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1492 lowJSValue(m_node->child2(), ManualOperandSpeculation),
1493 lowJSValue(m_node->child3(), ManualOperandSpeculation));
1496 m_out.int64, m_out.operation(operationStrCat2), m_callFrame,
1497 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1498 lowJSValue(m_node->child2(), ManualOperandSpeculation));
1503 void compileArithAddOrSub()
1505 bool isSub = m_node->op() == ArithSub;
1506 switch (m_node->binaryUseKind()) {
1508 LValue left = lowInt32(m_node->child1());
1509 LValue right = lowInt32(m_node->child2());
1511 if (!shouldCheckOverflow(m_node->arithMode())) {
1512 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
1516 CheckValue* result =
1517 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1518 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1524 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52)
1525 && !abstractValue(m_node->child2()).couldBeType(SpecInt52)) {
1527 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1528 LValue right = lowInt52(m_node->child2(), kind);
1529 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
1533 LValue left = lowInt52(m_node->child1());
1534 LValue right = lowInt52(m_node->child2());
1535 CheckValue* result =
1536 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1537 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1542 case DoubleRepUse: {
1543 LValue C1 = lowDouble(m_node->child1());
1544 LValue C2 = lowDouble(m_node->child2());
1546 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
1552 DFG_CRASH(m_graph, m_node, "Bad use kind");
1556 emitBinarySnippet<JITSubGenerator>(operationValueSub);
1561 DFG_CRASH(m_graph, m_node, "Bad use kind");
1566 void compileArithClz32()
1568 LValue operand = lowInt32(m_node->child1());
1569 setInt32(m_out.ctlz32(operand));
1572 void compileArithMul()
1574 switch (m_node->binaryUseKind()) {
1576 LValue left = lowInt32(m_node->child1());
1577 LValue right = lowInt32(m_node->child2());
1581 if (!shouldCheckOverflow(m_node->arithMode()))
1582 result = m_out.mul(left, right);
1584 CheckValue* speculation = m_out.speculateMul(left, right);
1585 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
1586 result = speculation;
1589 if (shouldCheckNegativeZero(m_node->arithMode())) {
1590 LBasicBlock slowCase = m_out.newBlock();
1591 LBasicBlock continuation = m_out.newBlock();
1594 m_out.notZero32(result), usually(continuation), rarely(slowCase));
1596 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
1597 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
1598 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
1599 m_out.jump(continuation);
1600 m_out.appendTo(continuation, lastNext);
1609 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1610 LValue right = lowInt52(m_node->child2(), opposite(kind));
1612 CheckValue* result = m_out.speculateMul(left, right);
1613 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1615 if (shouldCheckNegativeZero(m_node->arithMode())) {
1616 LBasicBlock slowCase = m_out.newBlock();
1617 LBasicBlock continuation = m_out.newBlock();
1620 m_out.notZero64(result), usually(continuation), rarely(slowCase));
1622 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
1623 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
1624 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
1625 m_out.jump(continuation);
1626 m_out.appendTo(continuation, lastNext);
1633 case DoubleRepUse: {
1635 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1640 emitBinarySnippet<JITMulGenerator>(operationValueMul);
1645 DFG_CRASH(m_graph, m_node, "Bad use kind");
1650 void compileArithDiv()
1652 switch (m_node->binaryUseKind()) {
1654 LValue numerator = lowInt32(m_node->child1());
1655 LValue denominator = lowInt32(m_node->child2());
1657 if (shouldCheckNegativeZero(m_node->arithMode())) {
1658 LBasicBlock zeroNumerator = m_out.newBlock();
1659 LBasicBlock numeratorContinuation = m_out.newBlock();
1662 m_out.isZero32(numerator),
1663 rarely(zeroNumerator), usually(numeratorContinuation));
1665 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
1668 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
1670 m_out.jump(numeratorContinuation);
1672 m_out.appendTo(numeratorContinuation, innerLastNext);
1675 if (shouldCheckOverflow(m_node->arithMode())) {
1676 LBasicBlock unsafeDenominator = m_out.newBlock();
1677 LBasicBlock continuation = m_out.newBlock();
1679 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
1681 m_out.above(adjustedDenominator, m_out.int32One),
1682 usually(continuation), rarely(unsafeDenominator));
1684 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
1685 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
1686 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
1687 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
1688 m_out.jump(continuation);
1690 m_out.appendTo(continuation, lastNext);
1691 LValue result = m_out.div(numerator, denominator);
1693 Overflow, noValue(), 0,
1694 m_out.notEqual(m_out.mul(result, denominator), numerator));
1697 setInt32(m_out.chillDiv(numerator, denominator));
1702 case DoubleRepUse: {
1703 setDouble(m_out.doubleDiv(
1704 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1709 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
1714 DFG_CRASH(m_graph, m_node, "Bad use kind");
1719 void compileArithMod()
1721 switch (m_node->binaryUseKind()) {
1723 LValue numerator = lowInt32(m_node->child1());
1724 LValue denominator = lowInt32(m_node->child2());
1727 if (shouldCheckOverflow(m_node->arithMode())) {
1728 LBasicBlock unsafeDenominator = m_out.newBlock();
1729 LBasicBlock continuation = m_out.newBlock();
1731 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
1733 m_out.above(adjustedDenominator, m_out.int32One),
1734 usually(continuation), rarely(unsafeDenominator));
1736 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
1737 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
1738 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
1739 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
1740 m_out.jump(continuation);
1742 m_out.appendTo(continuation, lastNext);
1743 LValue result = m_out.mod(numerator, denominator);
1746 remainder = m_out.chillMod(numerator, denominator);
1748 if (shouldCheckNegativeZero(m_node->arithMode())) {
1749 LBasicBlock negativeNumerator = m_out.newBlock();
1750 LBasicBlock numeratorContinuation = m_out.newBlock();
1753 m_out.lessThan(numerator, m_out.int32Zero),
1754 unsure(negativeNumerator), unsure(numeratorContinuation));
1756 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
1758 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
1760 m_out.jump(numeratorContinuation);
1762 m_out.appendTo(numeratorContinuation, innerLastNext);
1765 setInt32(remainder);
1769 case DoubleRepUse: {
1771 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1776 DFG_CRASH(m_graph, m_node, "Bad use kind");
1781 void compileArithMinOrMax()
1783 switch (m_node->binaryUseKind()) {
1785 LValue left = lowInt32(m_node->child1());
1786 LValue right = lowInt32(m_node->child2());
1790 m_node->op() == ArithMin
1791 ? m_out.lessThan(left, right)
1792 : m_out.lessThan(right, left),
1797 case DoubleRepUse: {
1798 LValue left = lowDouble(m_node->child1());
1799 LValue right = lowDouble(m_node->child2());
1801 LBasicBlock notLessThan = m_out.newBlock();
1802 LBasicBlock continuation = m_out.newBlock();
1804 Vector<ValueFromBlock, 2> results;
1806 results.append(m_out.anchor(left));
1808 m_node->op() == ArithMin
1809 ? m_out.doubleLessThan(left, right)
1810 : m_out.doubleGreaterThan(left, right),
1811 unsure(continuation), unsure(notLessThan));
1813 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
1814 results.append(m_out.anchor(m_out.select(
1815 m_node->op() == ArithMin
1816 ? m_out.doubleGreaterThanOrEqual(left, right)
1817 : m_out.doubleLessThanOrEqual(left, right),
1818 right, m_out.constDouble(PNaN))));
1819 m_out.jump(continuation);
1821 m_out.appendTo(continuation, lastNext);
1822 setDouble(m_out.phi(m_out.doubleType, results));
1827 DFG_CRASH(m_graph, m_node, "Bad use kind");
1832 void compileArithAbs()
1834 switch (m_node->child1().useKind()) {
1836 LValue value = lowInt32(m_node->child1());
1838 LValue mask = m_out.aShr(value, m_out.constInt32(31));
1839 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
1841 if (shouldCheckOverflow(m_node->arithMode()))
1842 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
1848 case DoubleRepUse: {
1849 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
1854 DFG_CRASH(m_graph, m_node, "Bad use kind");
1859 void compileArithSin() { setDouble(m_out.doubleSin(lowDouble(m_node->child1()))); }
1861 void compileArithCos() { setDouble(m_out.doubleCos(lowDouble(m_node->child1()))); }
1863 void compileArithPow()
1865 if (m_node->child2().useKind() == Int32Use)
1866 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
1868 LValue base = lowDouble(m_node->child1());
1869 LValue exponent = lowDouble(m_node->child2());
1871 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
1872 LBasicBlock integerExponentPowBlock = m_out.newBlock();
1873 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
1874 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
1875 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
1876 LBasicBlock powBlock = m_out.newBlock();
1877 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
1878 LBasicBlock continuation = m_out.newBlock();
1880 LValue integerExponent = m_out.doubleToInt(exponent);
1881 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
1882 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
1883 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
1885 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
1886 LValue integerExponentBelow1000 = m_out.below(integerExponent, m_out.constInt32(1000));
1887 m_out.branch(integerExponentBelow1000, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
1889 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
1890 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
1891 m_out.jump(continuation);
1893 // If y is NaN, the result is NaN.
1894 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionExponentIsInfinity);
1895 LValue exponentIsNaN;
1896 if (provenType(m_node->child2()) & SpecDoubleNaN)
1897 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
1899 exponentIsNaN = m_out.booleanFalse;
1900 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionExponentIsInfinity));
1902 // If abs(x) is 1 and y is +infinity, the result is NaN.
1903 // If abs(x) is 1 and y is -infinity, the result is NaN.
1904 m_out.appendTo(nanExceptionExponentIsInfinity, nanExceptionBaseIsOne);
1905 LValue absoluteExponent = m_out.doubleAbs(exponent);
1906 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
1907 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionBaseIsOne), usually(powBlock));
1909 m_out.appendTo(nanExceptionBaseIsOne, powBlock);
1910 LValue absoluteBase = m_out.doubleAbs(base);
1911 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
1912 m_out.branch(absoluteBaseIsOne, unsure(nanExceptionResultIsNaN), unsure(powBlock));
1914 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
1915 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
1916 m_out.jump(continuation);
1918 m_out.appendTo(nanExceptionResultIsNaN, continuation);
1919 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
1920 m_out.jump(continuation);
1922 m_out.appendTo(continuation, lastNext);
1923 setDouble(m_out.phi(m_out.doubleType, powDoubleIntResult, powResult, pureNan));
1927 void compileArithRandom()
1929 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
1931 // Inlined WeakRandom::advance().
1932 // uint64_t x = m_low;
1933 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
1934 LValue low = m_out.load64(m_out.absolute(lowAddress));
1935 // uint64_t y = m_high;
1936 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
1937 LValue high = m_out.load64(m_out.absolute(highAddress));
1939 m_out.store64(high, m_out.absolute(lowAddress));
1942 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
1945 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
1947 // x ^= y ^ (y >> 26);
1948 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
1951 m_out.store64(phase3, m_out.absolute(highAddress));
1954 LValue random64 = m_out.add(phase3, high);
1956 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
1957 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
1959 LValue double53Integer = m_out.intToDouble(random53);
1961 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
1962 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
1963 static const double scale = 1.0 / (1ULL << 53);
1965 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
1966 // It just reduces the exp part of the given 53bit double integer.
1967 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
1968 // Now we get 53bit precision random double value in [0, 1).
1969 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
1974 void compileArithRound()
1976 LValue result = nullptr;
1978 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
1979 LValue value = lowDouble(m_node->child1());
1980 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
1982 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
1983 LBasicBlock continuation = m_out.newBlock();
1985 LValue value = lowDouble(m_node->child1());
1986 LValue integerValue = m_out.doubleCeil(value);
1987 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
1989 LValue realPart = m_out.doubleSub(integerValue, value);
1991 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
1993 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
1994 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
1995 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
1996 m_out.jump(continuation);
1997 m_out.appendTo(continuation, lastNext);
1999 result = m_out.phi(m_out.doubleType, integerValueResult, integerValueRoundedDownResult);
2002 if (producesInteger(m_node->arithRoundingMode())) {
2003 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2004 setInt32(integerValue);
2009 void compileArithFloor()
2011 LValue value = lowDouble(m_node->child1());
2012 LValue integerValue = m_out.doubleFloor(value);
2013 if (producesInteger(m_node->arithRoundingMode()))
2014 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2016 setDouble(integerValue);
2019 void compileArithCeil()
2021 LValue value = lowDouble(m_node->child1());
2022 LValue integerValue = m_out.doubleCeil(value);
2023 if (producesInteger(m_node->arithRoundingMode()))
2024 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2026 setDouble(integerValue);
2029 void compileArithTrunc()
2031 LValue value = lowDouble(m_node->child1());
2032 LValue result = m_out.doubleTrunc(value);
2033 if (producesInteger(m_node->arithRoundingMode()))
2034 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2039 void compileArithSqrt() { setDouble(m_out.doubleSqrt(lowDouble(m_node->child1()))); }
2041 void compileArithLog() { setDouble(m_out.doubleLog(lowDouble(m_node->child1()))); }
2043 void compileArithFRound()
2045 setDouble(m_out.fround(lowDouble(m_node->child1())));
2048 void compileArithNegate()
2050 switch (m_node->child1().useKind()) {
2052 LValue value = lowInt32(m_node->child1());
2055 if (!shouldCheckOverflow(m_node->arithMode()))
2056 result = m_out.neg(value);
2057 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
2058 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
2059 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
2062 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
2063 result = m_out.neg(value);
2071 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52)) {
2073 LValue value = lowWhicheverInt52(m_node->child1(), kind);
2074 LValue result = m_out.neg(value);
2075 if (shouldCheckNegativeZero(m_node->arithMode()))
2076 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2077 setInt52(result, kind);
2081 LValue value = lowInt52(m_node->child1());
2082 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
2083 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
2084 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2089 case DoubleRepUse: {
2090 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
2095 DFG_CRASH(m_graph, m_node, "Bad use kind");
2100 void compileBitAnd()
2102 if (m_node->isBinaryUseKind(UntypedUse)) {
2103 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
2106 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2111 if (m_node->isBinaryUseKind(UntypedUse)) {
2112 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
2115 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2118 void compileBitXor()
2120 if (m_node->isBinaryUseKind(UntypedUse)) {
2121 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
2124 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2127 void compileBitRShift()
2129 if (m_node->isBinaryUseKind(UntypedUse)) {
2130 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
2133 setInt32(m_out.aShr(
2134 lowInt32(m_node->child1()),
2135 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2138 void compileBitLShift()
2140 if (m_node->isBinaryUseKind(UntypedUse)) {
2141 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
2145 lowInt32(m_node->child1()),
2146 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2149 void compileBitURShift()
2151 if (m_node->isBinaryUseKind(UntypedUse)) {
2152 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
2155 setInt32(m_out.lShr(
2156 lowInt32(m_node->child1()),
2157 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2160 void compileUInt32ToNumber()
2162 LValue value = lowInt32(m_node->child1());
2164 if (doesOverflow(m_node->arithMode())) {
2165 setStrictInt52(m_out.zeroExtPtr(value));
2169 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
2173 void compileCheckStructure()
2176 if (m_node->child1()->hasConstant())
2177 exitKind = BadConstantCache;
2179 exitKind = BadCache;
2181 switch (m_node->child1().useKind()) {
2183 case KnownCellUse: {
2184 LValue cell = lowCell(m_node->child1());
2187 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2188 exitKind, m_node->structureSet(),
2189 [&] (Structure* structure) {
2190 return weakStructureID(structure);
2195 case CellOrOtherUse: {
2196 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
2198 LBasicBlock cellCase = m_out.newBlock();
2199 LBasicBlock notCellCase = m_out.newBlock();
2200 LBasicBlock continuation = m_out.newBlock();
2203 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2205 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2207 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
2208 exitKind, m_node->structureSet(),
2209 [&] (Structure* structure) {
2210 return weakStructureID(structure);
2212 m_out.jump(continuation);
2214 m_out.appendTo(notCellCase, continuation);
2215 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
2216 m_out.jump(continuation);
2218 m_out.appendTo(continuation, lastNext);
2223 DFG_CRASH(m_graph, m_node, "Bad use kind");
2228 void compileCheckCell()
2230 LValue cell = lowCell(m_node->child1());
2233 BadCell, jsValueValue(cell), m_node->child1().node(),
2234 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
2237 void compileCheckBadCell()
2242 void compileCheckNotEmpty()
2244 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
2247 void compileCheckIdent()
2249 UniquedStringImpl* uid = m_node->uidOperand();
2250 if (uid->isSymbol()) {
2251 LValue symbol = lowSymbol(m_node->child1());
2252 LValue stringImpl = m_out.loadPtr(symbol, m_heaps.Symbol_privateName);
2253 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
2255 LValue string = lowStringIdent(m_node->child1());
2256 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
2257 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
2261 void compileGetExecutable()
2263 LValue cell = lowCell(m_node->child1());
2264 speculateFunction(m_node->child1(), cell);
2265 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
2268 void compileArrayifyToStructure()
2270 LValue cell = lowCell(m_node->child1());
2271 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
2273 LBasicBlock unexpectedStructure = m_out.newBlock();
2274 LBasicBlock continuation = m_out.newBlock();
2276 LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
2279 m_out.notEqual(structureID, weakStructureID(m_node->structure())),
2280 rarely(unexpectedStructure), usually(continuation));
2282 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
2285 switch (m_node->arrayMode().type()) {
2288 case Array::Contiguous:
2290 Uncountable, noValue(), 0,
2291 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
2298 switch (m_node->arrayMode().type()) {
2300 vmCall(m_out.voidType, m_out.operation(operationEnsureInt32), m_callFrame, cell);
2303 vmCall(m_out.voidType, m_out.operation(operationEnsureDouble), m_callFrame, cell);
2305 case Array::Contiguous:
2306 vmCall(m_out.voidType, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
2308 case Array::ArrayStorage:
2309 case Array::SlowPutArrayStorage:
2310 vmCall(m_out.voidType, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
2313 DFG_CRASH(m_graph, m_node, "Bad array type");
2317 structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
2319 BadIndexingType, jsValueValue(cell), 0,
2320 m_out.notEqual(structureID, weakStructureID(m_node->structure())));
2321 m_out.jump(continuation);
2323 m_out.appendTo(continuation, lastNext);
2326 void compilePutStructure()
2328 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
2330 Structure* oldStructure = m_node->transition()->previous;
2331 Structure* newStructure = m_node->transition()->next;
2332 ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
2333 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
2334 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
2336 LValue cell = lowCell(m_node->child1());
2338 weakStructureID(newStructure),
2339 cell, m_heaps.JSCell_structureID);
2342 void compileGetById(AccessType type)
2344 ASSERT(type == AccessType::Get || type == AccessType::GetPure);
2345 switch (m_node->child1().useKind()) {
2347 setJSValue(getById(lowCell(m_node->child1()), type));
2352 // This is pretty weird, since we duplicate the slow path both here and in the
2353 // code generated by the IC. We should investigate making this less bad.
2354 // https://bugs.webkit.org/show_bug.cgi?id=127830
2355 LValue value = lowJSValue(m_node->child1());
2357 LBasicBlock cellCase = m_out.newBlock();
2358 LBasicBlock notCellCase = m_out.newBlock();
2359 LBasicBlock continuation = m_out.newBlock();
2362 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2364 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2365 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
2366 m_out.jump(continuation);
2368 J_JITOperation_EJI getByIdFunction;
2369 if (type == AccessType::Get)
2370 getByIdFunction = operationGetByIdGeneric;
2372 getByIdFunction = operationTryGetByIdGeneric;
2374 m_out.appendTo(notCellCase, continuation);
2375 ValueFromBlock notCellResult = m_out.anchor(vmCall(
2376 m_out.int64, m_out.operation(getByIdFunction),
2378 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
2379 m_out.jump(continuation);
2381 m_out.appendTo(continuation, lastNext);
2382 setJSValue(m_out.phi(m_out.int64, cellResult, notCellResult));
2387 DFG_CRASH(m_graph, m_node, "Bad use kind");
2392 void compilePutById()
2394 Node* node = m_node;
2396 // See above; CellUse is easier so we do only that for now.
2397 ASSERT(node->child1().useKind() == CellUse);
2399 LValue base = lowCell(node->child1());
2400 LValue value = lowJSValue(node->child2());
2401 auto uid = m_graph.identifiers()[node->identifierNumber()];
2403 B3::PatchpointValue* patchpoint = m_out.patchpoint(Void);
2404 patchpoint->appendSomeRegister(base);
2405 patchpoint->appendSomeRegister(value);
2406 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
2407 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
2408 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2410 // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
2411 // https://bugs.webkit.org/show_bug.cgi?id=152848
2413 RefPtr<PatchpointExceptionHandle> exceptionHandle =
2414 preparePatchpointForExceptions(patchpoint);
2416 State* state = &m_ftlState;
2417 ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->ecmaMode();
2419 patchpoint->setGenerator(
2420 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2421 AllowMacroScratchRegisterUsage allowScratch(jit);
2423 CallSiteIndex callSiteIndex =
2424 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
2426 Box<CCallHelpers::JumpList> exceptions =
2427 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2429 // JS setter call ICs generated by the PutById IC will need this.
2430 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
2432 auto generator = Box<JITPutByIdGenerator>::create(
2433 jit.codeBlock(), node->origin.semantic, callSiteIndex,
2434 params.unavailableRegisters(), JSValueRegs(params[0].gpr()),
2435 JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode,
2436 node->op() == PutByIdDirect ? Direct : NotDirect);
2438 generator->generateFastPath(jit);
2439 CCallHelpers::Label done = jit.label();
2442 [=] (CCallHelpers& jit) {
2443 AllowMacroScratchRegisterUsage allowScratch(jit);
2445 generator->slowPathJump().link(&jit);
2446 CCallHelpers::Label slowPathBegin = jit.label();
2447 CCallHelpers::Call slowPathCall = callOperation(
2448 *state, params.unavailableRegisters(), jit, node->origin.semantic,
2449 exceptions.get(), generator->slowPathFunction(), InvalidGPRReg,
2450 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
2451 params[0].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
2452 jit.jump().linkTo(done, &jit);
2454 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
2457 [=] (LinkBuffer& linkBuffer) {
2458 generator->finalize(linkBuffer);
2464 void compileGetButterfly()
2466 setStorage(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly));
2469 void compileConstantStoragePointer()
2471 setStorage(m_out.constIntPtr(m_node->storagePointer()));
2474 void compileGetIndexedPropertyStorage()
2476 LValue cell = lowCell(m_node->child1());
2478 if (m_node->arrayMode().type() == Array::String) {
2479 LBasicBlock slowPath = m_out.newBlock();
2480 LBasicBlock continuation = m_out.newBlock();
2482 LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
2483 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
2486 m_out.notNull(fastResultValue), usually(continuation), rarely(slowPath));
2488 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
2490 ValueFromBlock slowResult = m_out.anchor(
2491 vmCall(m_out.intPtr, m_out.operation(operationResolveRope), m_callFrame, cell));
2493 m_out.jump(continuation);
2495 m_out.appendTo(continuation, lastNext);
2497 setStorage(m_out.loadPtr(m_out.phi(m_out.intPtr, fastResult, slowResult), m_heaps.StringImpl_data));
2501 setStorage(m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector));
2504 void compileCheckArray()
2506 Edge edge = m_node->child1();
2507 LValue cell = lowCell(edge);
2509 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
2513 BadIndexingType, jsValueValue(cell), 0,
2514 m_out.logicalNot(isArrayType(cell, m_node->arrayMode())));
2517 void compileGetTypedArrayByteOffset()
2519 LValue basePtr = lowCell(m_node->child1());
2521 LBasicBlock simpleCase = m_out.newBlock();
2522 LBasicBlock wastefulCase = m_out.newBlock();
2523 LBasicBlock continuation = m_out.newBlock();
2525 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
2527 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
2528 unsure(simpleCase), unsure(wastefulCase));
2530 LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
2532 ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
2534 m_out.jump(continuation);
2536 m_out.appendTo(wastefulCase, continuation);
2538 LValue vectorPtr = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
2539 LValue butterflyPtr = m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly);
2540 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
2541 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
2543 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
2545 m_out.jump(continuation);
2546 m_out.appendTo(continuation, lastNext);
2548 setInt32(m_out.castToInt32(m_out.phi(m_out.intPtr, simpleOut, wastefulOut)));
2551 void compileGetArrayLength()
2553 switch (m_node->arrayMode().type()) {
2556 case Array::Contiguous: {
2557 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
2561 case Array::String: {
2562 LValue string = lowCell(m_node->child1());
2563 setInt32(m_out.load32NonNegative(string, m_heaps.JSString_length));
2567 case Array::DirectArguments: {
2568 LValue arguments = lowCell(m_node->child1());
2570 ExoticObjectMode, noValue(), nullptr,
2571 m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_overrides)));
2572 setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
2576 case Array::ScopedArguments: {
2577 LValue arguments = lowCell(m_node->child1());
2579 ExoticObjectMode, noValue(), nullptr,
2580 m_out.notZero32(m_out.load8ZeroExt32(arguments, m_heaps.ScopedArguments_overrodeThings)));
2581 setInt32(m_out.load32NonNegative(arguments, m_heaps.ScopedArguments_totalLength));
2586 if (m_node->arrayMode().isSomeTypedArrayView()) {
2588 m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
2592 DFG_CRASH(m_graph, m_node, "Bad array type");
2597 void compileCheckInBounds()
2600 OutOfBounds, noValue(), 0,
2601 m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2604 void compileGetByVal()
2606 switch (m_node->arrayMode().type()) {
2608 case Array::Contiguous: {
2609 LValue index = lowInt32(m_node->child2());
2610 LValue storage = lowStorage(m_node->child3());
2612 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
2613 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
2615 if (m_node->arrayMode().isInBounds()) {
2616 LValue result = m_out.load64(baseIndex(heap, storage, index, m_node->child2()));
2617 LValue isHole = m_out.isZero64(result);
2618 if (m_node->arrayMode().isSaneChain()) {
2620 m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous);
2621 result = m_out.select(
2622 isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result);
2624 speculate(LoadFromHole, noValue(), 0, isHole);
2629 LValue base = lowCell(m_node->child1());
2631 LBasicBlock fastCase = m_out.newBlock();
2632 LBasicBlock slowCase = m_out.newBlock();
2633 LBasicBlock continuation = m_out.newBlock();
2637 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
2638 rarely(slowCase), usually(fastCase));
2640 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
2642 LValue fastResultValue = m_out.load64(baseIndex(heap, storage, index, m_node->child2()));
2643 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
2645 m_out.isZero64(fastResultValue), rarely(slowCase), usually(continuation));
2647 m_out.appendTo(slowCase, continuation);
2648 ValueFromBlock slowResult = m_out.anchor(
2649 vmCall(m_out.int64, m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
2650 m_out.jump(continuation);
2652 m_out.appendTo(continuation, lastNext);
2653 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
2657 case Array::Double: {
2658 LValue index = lowInt32(m_node->child2());
2659 LValue storage = lowStorage(m_node->child3());
2661 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
2663 if (m_node->arrayMode().isInBounds()) {
2664 LValue result = m_out.loadDouble(
2665 baseIndex(heap, storage, index, m_node->child2()));
2667 if (!m_node->arrayMode().isSaneChain()) {
2669 LoadFromHole, noValue(), 0,
2670 m_out.doubleNotEqualOrUnordered(result, result));
2676 LValue base = lowCell(m_node->child1());
2678 LBasicBlock inBounds = m_out.newBlock();
2679 LBasicBlock boxPath = m_out.newBlock();
2680 LBasicBlock slowCase = m_out.newBlock();
2681 LBasicBlock continuation = m_out.newBlock();
2685 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
2686 rarely(slowCase), usually(inBounds));
2688 LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
2689 LValue doubleValue = m_out.loadDouble(
2690 baseIndex(heap, storage, index, m_node->child2()));
2692 m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue),
2693 rarely(slowCase), usually(boxPath));
2695 m_out.appendTo(boxPath, slowCase);
2696 ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue));
2697 m_out.jump(continuation);
2699 m_out.appendTo(slowCase, continuation);
2700 ValueFromBlock slowResult = m_out.anchor(
2701 vmCall(m_out.int64, m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
2702 m_out.jump(continuation);
2704 m_out.appendTo(continuation, lastNext);
2705 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
2709 case Array::Undecided: {
2710 LValue index = lowInt32(m_node->child2());
2712 speculate(OutOfBounds, noValue(), m_node, m_out.lessThan(index, m_out.int32Zero));
2713 setJSValue(m_out.constInt64(ValueUndefined));
2717 case Array::DirectArguments: {
2718 LValue base = lowCell(m_node->child1());
2719 LValue index = lowInt32(m_node->child2());
2722 ExoticObjectMode, noValue(), nullptr,
2723 m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_overrides)));
2725 ExoticObjectMode, noValue(), nullptr,
2728 m_out.load32NonNegative(base, m_heaps.DirectArguments_length)));
2730 TypedPointer address = m_out.baseIndex(
2731 m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
2732 setJSValue(m_out.load64(address));
2736 case Array::ScopedArguments: {
2737 LValue base = lowCell(m_node->child1());
2738 LValue index = lowInt32(m_node->child2());
2741 ExoticObjectMode, noValue(), nullptr,
2744 m_out.load32NonNegative(base, m_heaps.ScopedArguments_totalLength)));
2746 LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
2747 LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
2749 LBasicBlock namedCase = m_out.newBlock();
2750 LBasicBlock overflowCase = m_out.newBlock();
2751 LBasicBlock continuation = m_out.newBlock();
2754 m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
2756 LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
2758 LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
2759 LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
2761 TypedPointer address = m_out.baseIndex(
2762 m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
2763 LValue scopeOffset = m_out.load32(address);
2766 ExoticObjectMode, noValue(), nullptr,
2767 m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
2769 address = m_out.baseIndex(
2770 m_heaps.JSEnvironmentRecord_variables, scope, m_out.zeroExtPtr(scopeOffset));
2771 ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
2772 m_out.jump(continuation);
2774 m_out.appendTo(overflowCase, continuation);
2776 address = m_out.baseIndex(
2777 m_heaps.ScopedArguments_overflowStorage, base,
2778 m_out.zeroExtPtr(m_out.sub(index, namedLength)));
2779 LValue overflowValue = m_out.load64(address);
2780 speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
2781 ValueFromBlock overflowResult = m_out.anchor(overflowValue);
2782 m_out.jump(continuation);
2784 m_out.appendTo(continuation, lastNext);
2785 setJSValue(m_out.phi(m_out.int64, namedResult, overflowResult));
2789 case Array::Generic: {
2791 m_out.int64, m_out.operation(operationGetByVal), m_callFrame,
2792 lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
2796 case Array::String: {
2797 compileStringCharAt();
2802 LValue index = lowInt32(m_node->child2());
2803 LValue storage = lowStorage(m_node->child3());
2805 TypedArrayType type = m_node->arrayMode().typedArrayType();
2807 if (isTypedView(type)) {
2808 TypedPointer pointer = TypedPointer(
2809 m_heaps.typedArrayProperties,
2813 m_out.zeroExtPtr(index),
2814 m_out.constIntPtr(logElementSize(type)))));
2818 switch (elementSize(type)) {
2820 result = isSigned(type) ? m_out.load8SignExt32(pointer) : m_out.load8ZeroExt32(pointer);
2823 result = isSigned(type) ? m_out.load16SignExt32(pointer) : m_out.load16ZeroExt32(pointer);
2826 result = m_out.load32(pointer);
2829 DFG_CRASH(m_graph, m_node, "Bad element size");
2832 if (elementSize(type) < 4 || isSigned(type)) {
2837 if (m_node->shouldSpeculateInt32()) {
2839 Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2844 if (m_node->shouldSpeculateMachineInt()) {
2845 setStrictInt52(m_out.zeroExt(result, m_out.int64));
2849 setDouble(m_out.unsignedToDouble(result));
2853 ASSERT(isFloat(type));
2858 result = m_out.floatToDouble(m_out.loadFloat(pointer));
2861 result = m_out.loadDouble(pointer);
2864 DFG_CRASH(m_graph, m_node, "Bad typed array type");
2871 DFG_CRASH(m_graph, m_node, "Bad array type");
2876 void compileGetMyArgumentByVal()
2878 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
2880 LValue index = lowInt32(m_node->child2());
2883 if (inlineCallFrame && !inlineCallFrame->isVarargs())
2884 limit = m_out.constInt32(inlineCallFrame->arguments.size() - 1);
2886 VirtualRegister argumentCountRegister;
2887 if (!inlineCallFrame)
2888 argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
2890 argumentCountRegister = inlineCallFrame->argumentCountRegister;
2891 limit = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
2894 speculate(ExoticObjectMode, noValue(), 0, m_out.aboveOrEqual(index, limit));
2897 if (inlineCallFrame) {
2898 if (inlineCallFrame->arguments.size() <= 1) {
2899 // We should have already exited due to the bounds check, above. Just tell the
2900 // compiler that anything dominated by this instruction is not reachable, so
2901 // that we don't waste time generating such code. This will also plant some
2902 // kind of crashing instruction so that if by some fluke the bounds check didn't
2903 // work, we'll crash in an easy-to-see way.
2904 didAlreadyTerminate();
2907 base = addressFor(inlineCallFrame->arguments[1].virtualRegister());
2909 base = addressFor(virtualRegisterForArgument(1));
2911 LValue pointer = m_out.baseIndex(
2912 base.value(), m_out.zeroExt(index, m_out.intPtr), ScaleEight);
2913 setJSValue(m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer)));
2916 void compilePutByVal()
2918 Edge child1 = m_graph.varArgChild(m_node, 0);
2919 Edge child2 = m_graph.varArgChild(m_node, 1);
2920 Edge child3 = m_graph.varArgChild(m_node, 2);
2921 Edge child4 = m_graph.varArgChild(m_node, 3);
2922 Edge child5 = m_graph.varArgChild(m_node, 4);
2924 switch (m_node->arrayMode().type()) {
2925 case Array::Generic: {
2926 V_JITOperation_EJJJ operation;
2927 if (m_node->op() == PutByValDirect) {
2928 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2929 operation = operationPutByValDirectStrict;
2931 operation = operationPutByValDirectNonStrict;
2933 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2934 operation = operationPutByValStrict;
2936 operation = operationPutByValNonStrict;
2940 m_out.voidType, m_out.operation(operation), m_callFrame,
2941 lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
2949 LValue base = lowCell(child1);
2950 LValue index = lowInt32(child2);
2951 LValue storage = lowStorage(child4);
2953 switch (m_node->arrayMode().type()) {
2956 case Array::Contiguous: {
2957 LBasicBlock continuation = m_out.newBlock();
2958 LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
2960 switch (m_node->arrayMode().type()) {
2962 case Array::Contiguous: {
2963 LValue value = lowJSValue(child3, ManualOperandSpeculation);
2965 if (m_node->arrayMode().type() == Array::Int32)
2966 FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32, isNotInt32(value));
2968 TypedPointer elementPointer = m_out.baseIndex(
2969 m_node->arrayMode().type() == Array::Int32 ?
2970 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
2971 storage, m_out.zeroExtPtr(index), provenValue(child2));
2973 if (m_node->op() == PutByValAlias) {
2974 m_out.store64(value, elementPointer);
2978 contiguousPutByValOutOfBounds(
2979 codeBlock()->isStrictMode()
2980 ? operationPutByValBeyondArrayBoundsStrict
2981 : operationPutByValBeyondArrayBoundsNonStrict,
2982 base, storage, index, value, continuation);
2984 m_out.store64(value, elementPointer);
2988 case Array::Double: {
2989 LValue value = lowDouble(child3);
2992 doubleValue(value), child3, SpecDoubleReal,
2993 m_out.doubleNotEqualOrUnordered(value, value));
2995 TypedPointer elementPointer = m_out.baseIndex(
2996 m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
2997 provenValue(child2));
2999 if (m_node->op() == PutByValAlias) {
3000 m_out.storeDouble(value, elementPointer);
3004 contiguousPutByValOutOfBounds(
3005 codeBlock()->isStrictMode()
3006 ? operationPutDoubleByValBeyondArrayBoundsStrict
3007 : operationPutDoubleByValBeyondArrayBoundsNonStrict,
3008 base, storage, index, value, continuation);
3010 m_out.storeDouble(value, elementPointer);
3015 DFG_CRASH(m_graph, m_node, "Bad array type");
3018 m_out.jump(continuation);
3019 m_out.appendTo(continuation, outerLastNext);
3024 TypedArrayType type = m_node->arrayMode().typedArrayType();
3026 if (isTypedView(type)) {
3027 TypedPointer pointer = TypedPointer(
3028 m_heaps.typedArrayProperties,
3032 m_out.zeroExt(index, m_out.intPtr),
3033 m_out.constIntPtr(logElementSize(type)))));
3035 Output::StoreType storeType;
3036 LValue valueToStore;
3040 switch (child3.useKind()) {
3043 if (child3.useKind() == Int32Use)
3044 intValue = lowInt32(child3);
3046 intValue = m_out.castToInt32(lowStrictInt52(child3));
3048 if (isClamped(type)) {
3049 ASSERT(elementSize(type) == 1);
3051 LBasicBlock atLeastZero = m_out.newBlock();
3052 LBasicBlock continuation = m_out.newBlock();
3054 Vector<ValueFromBlock, 2> intValues;
3055 intValues.append(m_out.anchor(m_out.int32Zero));
3057 m_out.lessThan(intValue, m_out.int32Zero),
3058 unsure(continuation), unsure(atLeastZero));
3060 LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
3062 intValues.append(m_out.anchor(m_out.select(
3063 m_out.greaterThan(intValue, m_out.constInt32(255)),
3064 m_out.constInt32(255),
3066 m_out.jump(continuation);
3068 m_out.appendTo(continuation, lastNext);
3069 intValue = m_out.phi(m_out.int32, intValues);
3074 case DoubleRepUse: {
3075 LValue doubleValue = lowDouble(child3);
3077 if (isClamped(type)) {
3078 ASSERT(elementSize(type) == 1);
3080 LBasicBlock atLeastZero = m_out.newBlock();
3081 LBasicBlock withinRange = m_out.newBlock();
3082 LBasicBlock continuation = m_out.newBlock();
3084 Vector<ValueFromBlock, 3> intValues;
3085 intValues.append(m_out.anchor(m_out.int32Zero));
3087 m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
3088 unsure(continuation), unsure(atLeastZero));
3090 LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
3091 intValues.append(m_out.anchor(m_out.constInt32(255)));
3093 m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
3094 unsure(continuation), unsure(withinRange));
3096 m_out.appendTo(withinRange, continuation);
3097 intValues.append(m_out.anchor(m_out.doubleToInt(doubleValue)));
3098 m_out.jump(continuation);
3100 m_out.appendTo(continuation, lastNext);
3101 intValue = m_out.phi(m_out.int32, intValues);
3103 intValue = doubleToInt32(doubleValue);
3108 DFG_CRASH(m_graph, m_node, "Bad use kind");
3111 valueToStore = intValue;
3112 switch (elementSize(type)) {
3114 storeType = Output::Store32As8;
3117 storeType = Output::Store32As16;
3120 storeType = Output::Store32;
3123 DFG_CRASH(m_graph, m_node, "Bad element size");
3125 } else /* !isInt(type) */ {
3126 LValue value = lowDouble(child3);