2 * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLLowerDFGToLLVM.h"
31 #include "CodeBlockWithJITType.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGInPlaceAbstractState.h"
34 #include "DFGOSRAvailabilityAnalysisPhase.h"
35 #include "DirectArguments.h"
36 #include "FTLAbstractHeapRepository.h"
37 #include "FTLAvailableRecovery.h"
38 #include "FTLForOSREntryJITCode.h"
39 #include "FTLFormattedValue.h"
40 #include "FTLInlineCacheSize.h"
41 #include "FTLLoweredNodeValue.h"
42 #include "FTLOperations.h"
43 #include "FTLOutput.h"
44 #include "FTLThunks.h"
45 #include "FTLWeightedTarget.h"
46 #include "JSCInlines.h"
47 #include "JSLexicalEnvironment.h"
48 #include "OperandsInlines.h"
49 #include "ScopedArguments.h"
50 #include "ScopedArgumentsTable.h"
51 #include "VirtualRegister.h"
54 #include <llvm/InitializeLLVM.h>
55 #include <unordered_set>
56 #include <wtf/ProcessID.h>
58 #if ENABLE(FTL_NATIVE_CALL_INLINING)
59 #include "BundlePath.h"
62 namespace JSC { namespace FTL {
66 static std::atomic<int> compileCounter;
69 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable()
74 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
75 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
77 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
78 if (nodeIndex != UINT_MAX)
79 dataLog(", node @", nodeIndex);
85 // Using this instead of typeCheck() helps to reduce the load on LLVM, by creating
86 // significantly less dead code.
87 #define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) do { \
88 FormattedValue _ftc_lowValue = (lowValue); \
89 Edge _ftc_highValue = (highValue); \
90 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
91 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
93 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition)); \
96 class LowerDFGToLLVM {
98 LowerDFGToLLVM(State& state)
99 : m_graph(state.graph)
101 , m_heaps(state.context)
102 , m_out(state.context)
103 , m_state(state.graph)
104 , m_interpreter(state.graph, m_state)
106 , m_tbaaKind(mdKindID(state.context, "tbaa"))
107 , m_tbaaStructKind(mdKindID(state.context, "tbaa.struct"))
114 if (verboseCompilationEnabled()) {
116 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
117 "_", codeBlock()->hash());
121 m_graph.m_dominators.computeIfNecessary(m_graph);
124 moduleCreateWithNameInContext(name.data(), m_ftlState.context);
126 m_ftlState.function = addFunction(
127 m_ftlState.module, name.data(), functionType(m_out.int64));
128 setFunctionCallingConv(m_ftlState.function, LLVMCCallConv);
129 if (isX86() && Options::llvmDisallowAVX()) {
130 // AVX makes V8/raytrace 80% slower. It makes Kraken/audio-oscillator 4.5x
131 // slower. It should be disabled.
132 addTargetDependentFunctionAttr(m_ftlState.function, "target-features", "-avx");
135 if (verboseCompilationEnabled())
136 dataLog("Function ready, beginning lowering.\n");
138 m_out.initialize(m_ftlState.module, m_ftlState.function, m_heaps);
140 m_prologue = FTL_NEW_BLOCK(m_out, ("Prologue"));
141 LBasicBlock stackOverflow = FTL_NEW_BLOCK(m_out, ("Stack overflow"));
142 m_handleExceptions = FTL_NEW_BLOCK(m_out, ("Handle Exceptions"));
144 LBasicBlock checkArguments = FTL_NEW_BLOCK(m_out, ("Check arguments"));
146 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
147 m_highBlock = m_graph.block(blockIndex);
150 m_blocks.add(m_highBlock, FTL_NEW_BLOCK(m_out, ("Block ", *m_highBlock)));
153 m_out.appendTo(m_prologue, stackOverflow);
154 createPhiVariables();
156 auto preOrder = m_graph.blocksInPreOrder();
158 int maxNumberOfArguments = -1;
159 for (BasicBlock* block : preOrder) {
160 for (unsigned nodeIndex = block->size(); nodeIndex--; ) {
161 Node* node = block->at(nodeIndex);
162 switch (node->op()) {
164 case NativeConstruct: {
165 int numArgs = node->numChildren();
166 if (numArgs > maxNumberOfArguments)
167 maxNumberOfArguments = numArgs;
176 if (maxNumberOfArguments >= 0) {
177 m_execState = m_out.alloca(arrayType(m_out.int64, JSStack::CallFrameHeaderSize + maxNumberOfArguments));
178 m_execStorage = m_out.ptrToInt(m_execState, m_out.intPtr);
181 LValue capturedAlloca = m_out.alloca(arrayType(m_out.int64, m_graph.m_nextMachineLocal));
183 m_captured = m_out.add(
184 m_out.ptrToInt(capturedAlloca, m_out.intPtr),
185 m_out.constIntPtr(m_graph.m_nextMachineLocal * sizeof(Register)));
187 m_ftlState.capturedStackmapID = m_stackmapIDs++;
189 m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.capturedStackmapID),
190 m_out.int32Zero, capturedAlloca);
192 // If we have any CallVarargs then we nee to have a spill slot for it.
193 bool hasVarargs = false;
194 for (BasicBlock* block : preOrder) {
195 for (Node* node : *block) {
196 switch (node->op()) {
198 case CallForwardVarargs:
199 case ConstructVarargs:
200 case ConstructForwardVarargs:
209 LValue varargsSpillSlots = m_out.alloca(
210 arrayType(m_out.int64, JSCallVarargs::numSpillSlotsNeeded()));
211 m_ftlState.varargsSpillSlotsStackmapID = m_stackmapIDs++;
213 m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.varargsSpillSlotsStackmapID),
214 m_out.int32Zero, varargsSpillSlots);
217 // We should not create any alloca's after this point, since they will cease to
218 // be mem2reg candidates.
220 m_callFrame = m_out.ptrToInt(
221 m_out.call(m_out.frameAddressIntrinsic(), m_out.int32Zero), m_out.intPtr);
222 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
223 m_tagMask = m_out.constInt64(TagMask);
225 m_out.storePtr(m_out.constIntPtr(codeBlock()), addressFor(JSStack::CodeBlock));
228 didOverflowStack(), rarely(stackOverflow), usually(checkArguments));
230 m_out.appendTo(stackOverflow, m_handleExceptions);
231 m_out.call(m_out.operation(operationThrowStackOverflowError), m_callFrame, m_out.constIntPtr(codeBlock()));
232 m_ftlState.handleStackOverflowExceptionStackmapID = m_stackmapIDs++;
234 m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.handleStackOverflowExceptionStackmapID),
235 m_out.constInt32(MacroAssembler::maxJumpReplacementSize()));
238 m_out.appendTo(m_handleExceptions, checkArguments);
239 m_ftlState.handleExceptionStackmapID = m_stackmapIDs++;
241 m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.handleExceptionStackmapID),
242 m_out.constInt32(MacroAssembler::maxJumpReplacementSize()));
245 m_out.appendTo(checkArguments, lowBlock(m_graph.block(0)));
246 availabilityMap().clear();
247 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
248 for (unsigned i = codeBlock()->numParameters(); i--;) {
249 availabilityMap().m_locals.argument(i) =
250 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
252 m_codeOriginForExitTarget = CodeOrigin(0);
253 m_codeOriginForExitProfile = CodeOrigin(0);
255 for (unsigned i = codeBlock()->numParameters(); i--;) {
256 Node* node = m_graph.m_arguments[i];
257 VirtualRegister operand = virtualRegisterForArgument(i);
259 LValue jsValue = m_out.load64(addressFor(operand));
262 DFG_ASSERT(m_graph, node, operand == node->stackAccessData()->machineLocal);
264 // This is a hack, but it's an effective one. It allows us to do CSE on the
265 // primordial load of arguments. This assumes that the GetLocal that got put in
266 // place of the original SetArgument doesn't have any effects before it. This
268 m_loadedArgumentValues.add(node, jsValue);
271 switch (m_graph.m_argumentFormats[i]) {
273 speculate(BadType, jsValueValue(jsValue), node, isNotInt32(jsValue));
276 speculate(BadType, jsValueValue(jsValue), node, isNotBoolean(jsValue));
279 speculate(BadType, jsValueValue(jsValue), node, isNotCell(jsValue));
284 DFG_CRASH(m_graph, node, "Bad flush format for argument");
288 m_out.jump(lowBlock(m_graph.block(0)));
290 for (BasicBlock* block : preOrder)
293 if (Options::dumpLLVMIR())
294 dumpModule(m_ftlState.module);
296 if (verboseCompilationEnabled())
297 m_ftlState.dumpState("after lowering");
298 if (validationEnabled())
299 verifyModule(m_ftlState.module);
304 void createPhiVariables()
306 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
307 BasicBlock* block = m_graph.block(blockIndex);
310 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
311 Node* node = block->at(nodeIndex);
312 if (node->op() != Phi)
315 switch (node->flags() & NodeResultMask) {
316 case NodeResultDouble:
317 type = m_out.doubleType;
319 case NodeResultInt32:
322 case NodeResultInt52:
325 case NodeResultBoolean:
326 type = m_out.boolean;
332 DFG_CRASH(m_graph, node, "Bad Phi node result type");
335 m_phis.add(node, buildAlloca(m_out.m_builder, type));
340 void compileBlock(BasicBlock* block)
345 if (verboseCompilationEnabled())
346 dataLog("Compiling block ", *block, "\n");
350 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
353 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
354 m_nextHighBlock = m_graph.block(nextBlockIndex);
358 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
360 // All of this effort to find the next block gives us the ability to keep the
361 // generated IR in roughly program order. This ought not affect the performance
362 // of the generated code (since we expect LLVM to reorder things) but it will
363 // make IR dumps easier to read.
364 m_out.appendTo(lowBlock, m_nextLowBlock);
366 if (Options::ftlCrashes())
369 if (!m_highBlock->cfaHasVisited) {
370 if (verboseCompilationEnabled())
371 dataLog("Bailing because CFA didn't reach.\n");
372 crash(m_highBlock->index, UINT_MAX);
376 m_availabilityCalculator.beginBlock(m_highBlock);
379 m_state.beginBasicBlock(m_highBlock);
381 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
382 if (!compileNode(m_nodeIndex))
387 void safelyInvalidateAfterTermination()
389 if (verboseCompilationEnabled())
390 dataLog("Bailing.\n");
393 // Invalidate dominated blocks. Under normal circumstances we would expect
394 // them to be invalidated already. But you can have the CFA become more
395 // precise over time because the structures of objects change on the main
396 // thread. Failing to do this would result in weird crashes due to a value
397 // being used but not defined. Race conditions FTW!
398 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
399 BasicBlock* target = m_graph.block(blockIndex);
402 if (m_graph.m_dominators.dominates(m_highBlock, target)) {
403 if (verboseCompilationEnabled())
404 dataLog("Block ", *target, " will bail also.\n");
405 target->cfaHasVisited = false;
410 bool compileNode(unsigned nodeIndex)
412 if (!m_state.isValid()) {
413 safelyInvalidateAfterTermination();
417 m_node = m_highBlock->at(nodeIndex);
418 m_codeOriginForExitProfile = m_node->origin.semantic;
419 m_codeOriginForExitTarget = m_node->origin.forExit;
421 if (verboseCompilationEnabled())
422 dataLog("Lowering ", m_node, "\n");
424 m_availableRecoveries.resize(0);
426 m_interpreter.startExecuting();
428 switch (m_node->op()) {
438 compileDoubleConstant();
441 compileInt52Constant();
447 compileDoubleAsInt32();
456 compileValueToInt32();
458 case BooleanToNumber:
459 compileBooleanToNumber();
461 case ExtractOSREntryLocal:
462 compileExtractOSREntryLocal();
481 compileArithAddOrSub();
497 compileArithMinOrMax();
521 compileArithFRound();
524 compileArithNegate();
545 compileUInt32ToNumber();
548 compileCheckStructure();
554 compileCheckNotEmpty();
557 compileCheckBadCell();
560 compileGetExecutable();
562 case ArrayifyToStructure:
563 compileArrayifyToStructure();
566 compilePutStructure();
579 compileGetButterfly();
581 case ConstantStoragePointer:
582 compileConstantStoragePointer();
584 case GetIndexedPropertyStorage:
585 compileGetIndexedPropertyStorage();
591 compileGetArrayLength();
594 compileCheckInBounds();
599 case GetMyArgumentByVal:
600 compileGetMyArgumentByVal();
613 case CreateActivation:
614 compileCreateActivation();
617 compileNewFunction();
619 case CreateDirectArguments:
620 compileCreateDirectArguments();
622 case CreateScopedArguments:
623 compileCreateScopedArguments();
625 case CreateClonedArguments:
626 compileCreateClonedArguments();
635 compileNewArrayBuffer();
637 case NewArrayWithSize:
638 compileNewArrayWithSize();
640 case GetTypedArrayByteOffset:
641 compileGetTypedArrayByteOffset();
643 case AllocatePropertyStorage:
644 compileAllocatePropertyStorage();
646 case ReallocatePropertyStorage:
647 compileReallocatePropertyStorage();
650 case CallStringConstructor:
651 compileToStringOrCallStringConstructor();
654 compileToPrimitive();
660 compileStringCharAt();
662 case StringCharCodeAt:
663 compileStringCharCodeAt();
666 case GetGetterSetterByOffset:
667 compileGetByOffset();
675 case MultiGetByOffset:
676 compileMultiGetByOffset();
679 compilePutByOffset();
681 case MultiPutByOffset:
682 compileMultiPutByOffset();
685 compileGetGlobalVar();
688 compilePutGlobalVar();
691 compileNotifyWrite();
696 case GetArgumentCount:
697 compileGetArgumentCount();
706 compileGetClosureVar();
709 compilePutClosureVar();
711 case GetFromArguments:
712 compileGetFromArguments();
715 compilePutToArguments();
720 case CompareEqConstant:
721 compileCompareEqConstant();
723 case CompareStrictEq:
724 compileCompareStrictEq();
727 compileCompareLess();
730 compileCompareLessEq();
733 compileCompareGreater();
735 case CompareGreaterEq:
736 compileCompareGreaterEq();
743 compileCallOrConstruct();
746 case CallForwardVarargs:
747 case ConstructVarargs:
748 case ConstructForwardVarargs:
749 compileCallOrConstructVarargs();
752 compileLoadVarargs();
755 compileForwardVarargs();
757 #if ENABLE(FTL_NATIVE_CALL_INLINING)
759 case NativeConstruct:
760 compileNativeCallOrConstruct();
776 compileForceOSRExit();
779 case ThrowReferenceError:
782 case InvalidationPoint:
783 compileInvalidationPoint();
786 compileIsUndefined();
801 compileIsObjectOrNull();
809 case CheckHasInstance:
810 compileCheckHasInstance();
816 compileCountExecution();
819 compileStoreBarrier();
821 case HasIndexedProperty:
822 compileHasIndexedProperty();
824 case HasGenericProperty:
825 compileHasGenericProperty();
827 case HasStructureProperty:
828 compileHasStructureProperty();
831 compileGetDirectPname();
833 case GetEnumerableLength:
834 compileGetEnumerableLength();
836 case GetPropertyEnumerator:
837 compileGetPropertyEnumerator();
839 case GetEnumeratorStructurePname:
840 compileGetEnumeratorStructurePname();
842 case GetEnumeratorGenericPname:
843 compileGetEnumeratorGenericPname();
846 compileToIndexString();
848 case CheckStructureImmediate:
849 compileCheckStructureImmediate();
851 case MaterializeNewObject:
852 compileMaterializeNewObject();
854 case MaterializeCreateActivation:
855 compileMaterializeCreateActivation();
862 case PhantomNewObject:
863 case PhantomNewFunction:
864 case PhantomCreateActivation:
865 case PhantomDirectArguments:
866 case PhantomClonedArguments:
872 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
876 if (m_node->isTerminal())
879 if (!m_state.isValid()) {
880 safelyInvalidateAfterTermination();
884 m_availabilityCalculator.executeNode(m_node);
885 m_interpreter.executeEffects(nodeIndex);
890 void compileUpsilon()
892 LValue destination = m_phis.get(m_node->phi());
894 switch (m_node->child1().useKind()) {
896 m_out.set(lowDouble(m_node->child1()), destination);
899 m_out.set(lowInt32(m_node->child1()), destination);
902 m_out.set(lowInt52(m_node->child1()), destination);
905 m_out.set(lowBoolean(m_node->child1()), destination);
908 m_out.set(lowCell(m_node->child1()), destination);
911 m_out.set(lowJSValue(m_node->child1()), destination);
914 DFG_CRASH(m_graph, m_node, "Bad use kind");
921 LValue source = m_phis.get(m_node);
923 switch (m_node->flags() & NodeResultMask) {
924 case NodeResultDouble:
925 setDouble(m_out.get(source));
927 case NodeResultInt32:
928 setInt32(m_out.get(source));
930 case NodeResultInt52:
931 setInt52(m_out.get(source));
933 case NodeResultBoolean:
934 setBoolean(m_out.get(source));
937 setJSValue(m_out.get(source));
940 DFG_CRASH(m_graph, m_node, "Bad use kind");
945 void compileDoubleConstant()
947 setDouble(m_out.constDouble(m_node->asNumber()));
950 void compileInt52Constant()
952 int64_t value = m_node->asMachineInt();
954 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
955 setStrictInt52(m_out.constInt64(value));
958 void compileDoubleRep()
960 switch (m_node->child1().useKind()) {
962 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
963 setDouble(jsValueToDouble(m_node->child1(), value));
968 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
973 DFG_CRASH(m_graph, m_node, "Bad use kind");
977 void compileDoubleAsInt32()
979 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
980 setInt32(integerValue);
983 void compileValueRep()
985 switch (m_node->child1().useKind()) {
987 LValue value = lowDouble(m_node->child1());
989 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
990 value = m_out.select(
991 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
994 setJSValue(boxDouble(value));
999 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1004 DFG_CRASH(m_graph, m_node, "Bad use kind");
1008 void compileInt52Rep()
1010 switch (m_node->child1().useKind()) {
1012 setStrictInt52(m_out.signExt(lowInt32(m_node->child1()), m_out.int64));
1017 jsValueToStrictInt52(
1018 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1021 case DoubleRepMachineIntUse:
1023 doubleToStrictInt52(
1024 m_node->child1(), lowDouble(m_node->child1())));
1028 RELEASE_ASSERT_NOT_REACHED();
1032 void compileValueToInt32()
1034 switch (m_node->child1().useKind()) {
1036 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1040 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1045 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1046 if (isValid(value)) {
1047 setInt32(value.value());
1051 value = m_jsValueValues.get(m_node->child1().node());
1052 if (isValid(value)) {
1053 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1057 // We'll basically just get here for constants. But it's good to have this
1058 // catch-all since we often add new representations into the mix.
1060 numberOrNotCellToInt32(
1062 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1067 DFG_CRASH(m_graph, m_node, "Bad use kind");
1072 void compileBooleanToNumber()
1074 switch (m_node->child1().useKind()) {
1076 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), m_out.int32));
1081 LValue value = lowJSValue(m_node->child1());
1083 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1084 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1088 LBasicBlock booleanCase = FTL_NEW_BLOCK(m_out, ("BooleanToNumber boolean case"));
1089 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("BooleanToNumber continuation"));
1091 ValueFromBlock notBooleanResult = m_out.anchor(value);
1093 isBoolean(value, provenType(m_node->child1())),
1094 unsure(booleanCase), unsure(continuation));
1096 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1097 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1098 m_out.zeroExt(unboxBoolean(value), m_out.int64), m_tagTypeNumber));
1099 m_out.jump(continuation);
1101 m_out.appendTo(continuation, lastNext);
1102 setJSValue(m_out.phi(m_out.int64, booleanResult, notBooleanResult));
1107 RELEASE_ASSERT_NOT_REACHED();
1112 void compileExtractOSREntryLocal()
1114 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1115 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1116 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1119 void compileGetStack()
1121 // GetLocals arise only for captured variables and arguments. For arguments, we might have
1122 // already loaded it.
1123 if (LValue value = m_loadedArgumentValues.get(m_node)) {
1128 StackAccessData* data = m_node->stackAccessData();
1129 AbstractValue& value = m_state.variables().operand(data->local);
1131 DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
1132 DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
1134 if (isInt32Speculation(value.m_type))
1135 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1137 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1140 void compilePutStack()
1142 StackAccessData* data = m_node->stackAccessData();
1143 switch (data->format) {
1144 case FlushedJSValue: {
1145 LValue value = lowJSValue(m_node->child1());
1146 m_out.store64(value, addressFor(data->machineLocal));
1150 case FlushedDouble: {
1151 LValue value = lowDouble(m_node->child1());
1152 m_out.storeDouble(value, addressFor(data->machineLocal));
1156 case FlushedInt32: {
1157 LValue value = lowInt32(m_node->child1());
1158 m_out.store32(value, payloadFor(data->machineLocal));
1162 case FlushedInt52: {
1163 LValue value = lowInt52(m_node->child1());
1164 m_out.store64(value, addressFor(data->machineLocal));
1169 LValue value = lowCell(m_node->child1());
1170 m_out.store64(value, addressFor(data->machineLocal));
1174 case FlushedBoolean: {
1175 speculateBoolean(m_node->child1());
1177 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1178 addressFor(data->machineLocal));
1183 DFG_CRASH(m_graph, m_node, "Bad flush format");
1190 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1193 void compileToThis()
1195 LValue value = lowJSValue(m_node->child1());
1197 LBasicBlock isCellCase = FTL_NEW_BLOCK(m_out, ("ToThis is cell case"));
1198 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ToThis slow case"));
1199 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ToThis continuation"));
1202 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1204 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1205 ValueFromBlock fastResult = m_out.anchor(value);
1206 m_out.branch(isType(value, FinalObjectType), usually(continuation), rarely(slowCase));
1208 m_out.appendTo(slowCase, continuation);
1209 J_JITOperation_EJ function;
1210 if (m_graph.isStrictModeFor(m_node->origin.semantic))
1211 function = operationToThisStrict;
1213 function = operationToThis;
1214 ValueFromBlock slowResult = m_out.anchor(
1215 vmCall(m_out.operation(function), m_callFrame, value));
1216 m_out.jump(continuation);
1218 m_out.appendTo(continuation, lastNext);
1219 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
1222 void compileValueAdd()
1224 J_JITOperation_EJJ operation;
1225 if (!(provenType(m_node->child1()) & SpecFullNumber)
1226 && !(provenType(m_node->child2()) & SpecFullNumber))
1227 operation = operationValueAddNotNumber;
1229 operation = operationValueAdd;
1231 m_out.operation(operation), m_callFrame,
1232 lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
1235 void compileArithAddOrSub()
1237 bool isSub = m_node->op() == ArithSub;
1238 switch (m_node->binaryUseKind()) {
1240 LValue left = lowInt32(m_node->child1());
1241 LValue right = lowInt32(m_node->child2());
1243 if (!shouldCheckOverflow(m_node->arithMode())) {
1244 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
1250 result = m_out.addWithOverflow32(left, right);
1252 if (doesKill(m_node->child2())) {
1253 addAvailableRecovery(
1254 m_node->child2(), SubRecovery,
1255 m_out.extractValue(result, 0), left, ValueFormatInt32);
1256 } else if (doesKill(m_node->child1())) {
1257 addAvailableRecovery(
1258 m_node->child1(), SubRecovery,
1259 m_out.extractValue(result, 0), right, ValueFormatInt32);
1262 result = m_out.subWithOverflow32(left, right);
1264 if (doesKill(m_node->child2())) {
1265 // result = left - right
1266 // result - left = -right
1267 // right = left - result
1268 addAvailableRecovery(
1269 m_node->child2(), SubRecovery,
1270 left, m_out.extractValue(result, 0), ValueFormatInt32);
1271 } else if (doesKill(m_node->child1())) {
1272 // result = left - right
1273 // result + right = left
1274 addAvailableRecovery(
1275 m_node->child1(), AddRecovery,
1276 m_out.extractValue(result, 0), right, ValueFormatInt32);
1280 speculate(Overflow, noValue(), 0, m_out.extractValue(result, 1));
1281 setInt32(m_out.extractValue(result, 0));
1286 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52)
1287 && !abstractValue(m_node->child2()).couldBeType(SpecInt52)) {
1289 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1290 LValue right = lowInt52(m_node->child2(), kind);
1291 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
1295 LValue left = lowInt52(m_node->child1());
1296 LValue right = lowInt52(m_node->child2());
1300 result = m_out.addWithOverflow64(left, right);
1302 if (doesKill(m_node->child2())) {
1303 addAvailableRecovery(
1304 m_node->child2(), SubRecovery,
1305 m_out.extractValue(result, 0), left, ValueFormatInt52);
1306 } else if (doesKill(m_node->child1())) {
1307 addAvailableRecovery(
1308 m_node->child1(), SubRecovery,
1309 m_out.extractValue(result, 0), right, ValueFormatInt52);
1312 result = m_out.subWithOverflow64(left, right);
1314 if (doesKill(m_node->child2())) {
1315 // result = left - right
1316 // result - left = -right
1317 // right = left - result
1318 addAvailableRecovery(
1319 m_node->child2(), SubRecovery,
1320 left, m_out.extractValue(result, 0), ValueFormatInt52);
1321 } else if (doesKill(m_node->child1())) {
1322 // result = left - right
1323 // result + right = left
1324 addAvailableRecovery(
1325 m_node->child1(), AddRecovery,
1326 m_out.extractValue(result, 0), right, ValueFormatInt52);
1330 speculate(Int52Overflow, noValue(), 0, m_out.extractValue(result, 1));
1331 setInt52(m_out.extractValue(result, 0));
1335 case DoubleRepUse: {
1336 LValue C1 = lowDouble(m_node->child1());
1337 LValue C2 = lowDouble(m_node->child2());
1339 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
1344 DFG_CRASH(m_graph, m_node, "Bad use kind");
1349 void compileArithClz32()
1351 LValue operand = lowInt32(m_node->child1());
1352 LValue isZeroUndef = m_out.booleanFalse;
1353 setInt32(m_out.ctlz32(operand, isZeroUndef));
1356 void compileArithMul()
1358 switch (m_node->binaryUseKind()) {
1360 LValue left = lowInt32(m_node->child1());
1361 LValue right = lowInt32(m_node->child2());
1365 if (!shouldCheckOverflow(m_node->arithMode()))
1366 result = m_out.mul(left, right);
1368 LValue overflowResult = m_out.mulWithOverflow32(left, right);
1369 speculate(Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
1370 result = m_out.extractValue(overflowResult, 0);
1373 if (shouldCheckNegativeZero(m_node->arithMode())) {
1374 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ArithMul slow case"));
1375 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMul continuation"));
1378 m_out.notZero32(result), usually(continuation), rarely(slowCase));
1380 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
1381 LValue cond = m_out.bitOr(m_out.lessThan(left, m_out.int32Zero), m_out.lessThan(right, m_out.int32Zero));
1382 speculate(NegativeZero, noValue(), 0, cond);
1383 m_out.jump(continuation);
1384 m_out.appendTo(continuation, lastNext);
1393 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1394 LValue right = lowInt52(m_node->child2(), opposite(kind));
1396 LValue overflowResult = m_out.mulWithOverflow64(left, right);
1397 speculate(Int52Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
1398 LValue result = m_out.extractValue(overflowResult, 0);
1400 if (shouldCheckNegativeZero(m_node->arithMode())) {
1401 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ArithMul slow case"));
1402 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMul continuation"));
1405 m_out.notZero64(result), usually(continuation), rarely(slowCase));
1407 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
1408 LValue cond = m_out.bitOr(m_out.lessThan(left, m_out.int64Zero), m_out.lessThan(right, m_out.int64Zero));
1409 speculate(NegativeZero, noValue(), 0, cond);
1410 m_out.jump(continuation);
1411 m_out.appendTo(continuation, lastNext);
1418 case DoubleRepUse: {
1420 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1425 DFG_CRASH(m_graph, m_node, "Bad use kind");
1430 void compileArithDiv()
1432 switch (m_node->binaryUseKind()) {
1434 LValue numerator = lowInt32(m_node->child1());
1435 LValue denominator = lowInt32(m_node->child2());
1437 LBasicBlock unsafeDenominator = FTL_NEW_BLOCK(m_out, ("ArithDiv unsafe denominator"));
1438 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithDiv continuation"));
1439 LBasicBlock done = FTL_NEW_BLOCK(m_out, ("ArithDiv done"));
1441 Vector<ValueFromBlock, 3> results;
1443 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
1446 m_out.above(adjustedDenominator, m_out.int32One),
1447 usually(continuation), rarely(unsafeDenominator));
1449 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
1451 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
1453 if (shouldCheckOverflow(m_node->arithMode())) {
1454 LValue cond = m_out.bitOr(m_out.isZero32(denominator), m_out.equal(numerator, neg2ToThe31));
1455 speculate(Overflow, noValue(), 0, cond);
1456 m_out.jump(continuation);
1458 // This is the case where we convert the result to an int after we're done. So,
1459 // if the denominator is zero, then the result should be zero.
1460 // If the denominator is not zero (i.e. it's -1 because we're guarded by the
1461 // check above) and the numerator is -2^31 then the result should be -2^31.
1463 LBasicBlock divByZero = FTL_NEW_BLOCK(m_out, ("ArithDiv divide by zero"));
1464 LBasicBlock notDivByZero = FTL_NEW_BLOCK(m_out, ("ArithDiv not divide by zero"));
1465 LBasicBlock neg2ToThe31ByNeg1 = FTL_NEW_BLOCK(m_out, ("ArithDiv -2^31/-1"));
1468 m_out.isZero32(denominator), rarely(divByZero), usually(notDivByZero));
1470 m_out.appendTo(divByZero, notDivByZero);
1471 results.append(m_out.anchor(m_out.int32Zero));
1474 m_out.appendTo(notDivByZero, neg2ToThe31ByNeg1);
1476 m_out.equal(numerator, neg2ToThe31),
1477 rarely(neg2ToThe31ByNeg1), usually(continuation));
1479 m_out.appendTo(neg2ToThe31ByNeg1, continuation);
1480 results.append(m_out.anchor(neg2ToThe31));
1484 m_out.appendTo(continuation, done);
1486 if (shouldCheckNegativeZero(m_node->arithMode())) {
1487 LBasicBlock zeroNumerator = FTL_NEW_BLOCK(m_out, ("ArithDiv zero numerator"));
1488 LBasicBlock numeratorContinuation = FTL_NEW_BLOCK(m_out, ("ArithDiv numerator continuation"));
1491 m_out.isZero32(numerator),
1492 rarely(zeroNumerator), usually(numeratorContinuation));
1494 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
1497 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
1499 m_out.jump(numeratorContinuation);
1501 m_out.appendTo(numeratorContinuation, innerLastNext);
1504 LValue result = m_out.div(numerator, denominator);
1506 if (shouldCheckOverflow(m_node->arithMode())) {
1508 Overflow, noValue(), 0,
1509 m_out.notEqual(m_out.mul(result, denominator), numerator));
1512 results.append(m_out.anchor(result));
1515 m_out.appendTo(done, lastNext);
1517 setInt32(m_out.phi(m_out.int32, results));
1521 case DoubleRepUse: {
1522 setDouble(m_out.doubleDiv(
1523 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1528 DFG_CRASH(m_graph, m_node, "Bad use kind");
1533 void compileArithMod()
1535 switch (m_node->binaryUseKind()) {
1537 LValue numerator = lowInt32(m_node->child1());
1538 LValue denominator = lowInt32(m_node->child2());
1540 LBasicBlock unsafeDenominator = FTL_NEW_BLOCK(m_out, ("ArithMod unsafe denominator"));
1541 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMod continuation"));
1542 LBasicBlock done = FTL_NEW_BLOCK(m_out, ("ArithMod done"));
1544 Vector<ValueFromBlock, 3> results;
1546 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
1549 m_out.above(adjustedDenominator, m_out.int32One),
1550 usually(continuation), rarely(unsafeDenominator));
1552 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
1554 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
1556 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
1557 // separate case for that. But it probably doesn't matter so much.
1558 if (shouldCheckOverflow(m_node->arithMode())) {
1559 LValue cond = m_out.bitOr(m_out.isZero32(denominator), m_out.equal(numerator, neg2ToThe31));
1560 speculate(Overflow, noValue(), 0, cond);
1561 m_out.jump(continuation);
1563 // This is the case where we convert the result to an int after we're done. So,
1564 // if the denominator is zero, then the result should be result should be zero.
1565 // If the denominator is not zero (i.e. it's -1 because we're guarded by the
1566 // check above) and the numerator is -2^31 then the result should be -2^31.
1568 LBasicBlock modByZero = FTL_NEW_BLOCK(m_out, ("ArithMod modulo by zero"));
1569 LBasicBlock notModByZero = FTL_NEW_BLOCK(m_out, ("ArithMod not modulo by zero"));
1570 LBasicBlock neg2ToThe31ByNeg1 = FTL_NEW_BLOCK(m_out, ("ArithMod -2^31/-1"));
1573 m_out.isZero32(denominator), rarely(modByZero), usually(notModByZero));
1575 m_out.appendTo(modByZero, notModByZero);
1576 results.append(m_out.anchor(m_out.int32Zero));
1579 m_out.appendTo(notModByZero, neg2ToThe31ByNeg1);
1581 m_out.equal(numerator, neg2ToThe31),
1582 rarely(neg2ToThe31ByNeg1), usually(continuation));
1584 m_out.appendTo(neg2ToThe31ByNeg1, continuation);
1585 results.append(m_out.anchor(m_out.int32Zero));
1589 m_out.appendTo(continuation, done);
1591 LValue remainder = m_out.rem(numerator, denominator);
1593 if (shouldCheckNegativeZero(m_node->arithMode())) {
1594 LBasicBlock negativeNumerator = FTL_NEW_BLOCK(m_out, ("ArithMod negative numerator"));
1595 LBasicBlock numeratorContinuation = FTL_NEW_BLOCK(m_out, ("ArithMod numerator continuation"));
1598 m_out.lessThan(numerator, m_out.int32Zero),
1599 unsure(negativeNumerator), unsure(numeratorContinuation));
1601 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
1603 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
1605 m_out.jump(numeratorContinuation);
1607 m_out.appendTo(numeratorContinuation, innerLastNext);
1610 results.append(m_out.anchor(remainder));
1613 m_out.appendTo(done, lastNext);
1615 setInt32(m_out.phi(m_out.int32, results));
1619 case DoubleRepUse: {
1621 m_out.doubleRem(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1626 DFG_CRASH(m_graph, m_node, "Bad use kind");
1631 void compileArithMinOrMax()
1633 switch (m_node->binaryUseKind()) {
1635 LValue left = lowInt32(m_node->child1());
1636 LValue right = lowInt32(m_node->child2());
1640 m_node->op() == ArithMin
1641 ? m_out.lessThan(left, right)
1642 : m_out.lessThan(right, left),
1647 case DoubleRepUse: {
1648 LValue left = lowDouble(m_node->child1());
1649 LValue right = lowDouble(m_node->child2());
1651 LBasicBlock notLessThan = FTL_NEW_BLOCK(m_out, ("ArithMin/ArithMax not less than"));
1652 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMin/ArithMax continuation"));
1654 Vector<ValueFromBlock, 2> results;
1656 results.append(m_out.anchor(left));
1658 m_node->op() == ArithMin
1659 ? m_out.doubleLessThan(left, right)
1660 : m_out.doubleGreaterThan(left, right),
1661 unsure(continuation), unsure(notLessThan));
1663 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
1664 results.append(m_out.anchor(m_out.select(
1665 m_node->op() == ArithMin
1666 ? m_out.doubleGreaterThanOrEqual(left, right)
1667 : m_out.doubleLessThanOrEqual(left, right),
1668 right, m_out.constDouble(PNaN))));
1669 m_out.jump(continuation);
1671 m_out.appendTo(continuation, lastNext);
1672 setDouble(m_out.phi(m_out.doubleType, results));
1677 DFG_CRASH(m_graph, m_node, "Bad use kind");
1682 void compileArithAbs()
1684 switch (m_node->child1().useKind()) {
1686 LValue value = lowInt32(m_node->child1());
1688 LValue mask = m_out.aShr(value, m_out.constInt32(31));
1689 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
1691 speculate(Overflow, noValue(), 0, m_out.equal(result, m_out.constInt32(1 << 31)));
1697 case DoubleRepUse: {
1698 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
1703 DFG_CRASH(m_graph, m_node, "Bad use kind");
1708 void compileArithSin() { setDouble(m_out.doubleSin(lowDouble(m_node->child1()))); }
1710 void compileArithCos() { setDouble(m_out.doubleCos(lowDouble(m_node->child1()))); }
1712 void compileArithPow()
1714 // FIXME: investigate llvm.powi to better understand its performance characteristics.
1715 // It might be better to have the inline loop in DFG too.
1716 if (m_node->child2().useKind() == Int32Use)
1717 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
1719 LValue base = lowDouble(m_node->child1());
1720 LValue exponent = lowDouble(m_node->child2());
1722 LBasicBlock integerExponentIsSmallBlock = FTL_NEW_BLOCK(m_out, ("ArithPow test integer exponent is small."));
1723 LBasicBlock integerExponentPowBlock = FTL_NEW_BLOCK(m_out, ("ArithPow pow(double, (int)double)."));
1724 LBasicBlock doubleExponentPowBlockEntry = FTL_NEW_BLOCK(m_out, ("ArithPow pow(double, double)."));
1725 LBasicBlock nanExceptionExponentIsInfinity = FTL_NEW_BLOCK(m_out, ("ArithPow NaN Exception, check exponent is infinity."));
1726 LBasicBlock nanExceptionBaseIsOne = FTL_NEW_BLOCK(m_out, ("ArithPow NaN Exception, check base is one."));
1727 LBasicBlock powBlock = FTL_NEW_BLOCK(m_out, ("ArithPow regular pow"));
1728 LBasicBlock nanExceptionResultIsNaN = FTL_NEW_BLOCK(m_out, ("ArithPow NaN Exception, result is NaN."));
1729 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithPow continuation"));
1731 LValue integerExponent = m_out.fpToInt32(exponent);
1732 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
1733 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
1734 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
1736 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
1737 LValue integerExponentBelow1000 = m_out.below(integerExponent, m_out.constInt32(1000));
1738 m_out.branch(integerExponentBelow1000, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
1740 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
1741 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
1742 m_out.jump(continuation);
1744 // If y is NaN, the result is NaN.
1745 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionExponentIsInfinity);
1746 LValue exponentIsNaN;
1747 if (provenType(m_node->child2()) & SpecDoubleNaN)
1748 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
1750 exponentIsNaN = m_out.booleanFalse;
1751 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionExponentIsInfinity));
1753 // If abs(x) is 1 and y is +infinity, the result is NaN.
1754 // If abs(x) is 1 and y is -infinity, the result is NaN.
1755 m_out.appendTo(nanExceptionExponentIsInfinity, nanExceptionBaseIsOne);
1756 LValue absoluteExponent = m_out.doubleAbs(exponent);
1757 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
1758 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionBaseIsOne), usually(powBlock));
1760 m_out.appendTo(nanExceptionBaseIsOne, powBlock);
1761 LValue absoluteBase = m_out.doubleAbs(base);
1762 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
1763 m_out.branch(absoluteBaseIsOne, unsure(nanExceptionResultIsNaN), unsure(powBlock));
1765 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
1766 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
1767 m_out.jump(continuation);
1769 m_out.appendTo(nanExceptionResultIsNaN, continuation);
1770 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
1771 m_out.jump(continuation);
1773 m_out.appendTo(continuation, lastNext);
1774 setDouble(m_out.phi(m_out.doubleType, powDoubleIntResult, powResult, pureNan));
1778 void compileArithRound()
1780 LBasicBlock realPartIsMoreThanHalf = FTL_NEW_BLOCK(m_out, ("ArithRound should round down"));
1781 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithRound continuation"));
1783 LValue value = lowDouble(m_node->child1());
1784 LValue integerValue = m_out.ceil64(value);
1785 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
1787 LValue realPart = m_out.doubleSub(integerValue, value);
1789 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
1791 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
1792 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
1793 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
1794 m_out.jump(continuation);
1795 m_out.appendTo(continuation, lastNext);
1797 LValue result = m_out.phi(m_out.doubleType, integerValueResult, integerValueRoundedDownResult);
1799 if (producesInteger(m_node->arithRoundingMode())) {
1800 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
1801 setInt32(integerValue);
1806 void compileArithSqrt() { setDouble(m_out.doubleSqrt(lowDouble(m_node->child1()))); }
1808 void compileArithLog() { setDouble(m_out.doubleLog(lowDouble(m_node->child1()))); }
1810 void compileArithFRound()
1812 LValue floatValue = m_out.fpCast(lowDouble(m_node->child1()), m_out.floatType);
1813 setDouble(m_out.fpCast(floatValue, m_out.doubleType));
1816 void compileArithNegate()
1818 switch (m_node->child1().useKind()) {
1820 LValue value = lowInt32(m_node->child1());
1823 if (!shouldCheckOverflow(m_node->arithMode()))
1824 result = m_out.neg(value);
1825 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
1826 // We don't have a negate-with-overflow intrinsic. Hopefully this
1827 // does the trick, though.
1828 LValue overflowResult = m_out.subWithOverflow32(m_out.int32Zero, value);
1829 speculate(Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
1830 result = m_out.extractValue(overflowResult, 0);
1832 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
1833 result = m_out.neg(value);
1841 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52)) {
1843 LValue value = lowWhicheverInt52(m_node->child1(), kind);
1844 LValue result = m_out.neg(value);
1845 if (shouldCheckNegativeZero(m_node->arithMode()))
1846 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
1847 setInt52(result, kind);
1851 LValue value = lowInt52(m_node->child1());
1852 LValue overflowResult = m_out.subWithOverflow64(m_out.int64Zero, value);
1853 speculate(Int52Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
1854 LValue result = m_out.extractValue(overflowResult, 0);
1855 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
1860 case DoubleRepUse: {
1861 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
1866 DFG_CRASH(m_graph, m_node, "Bad use kind");
1871 void compileBitAnd()
1873 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
1878 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
1881 void compileBitXor()
1883 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
1886 void compileBitRShift()
1888 setInt32(m_out.aShr(
1889 lowInt32(m_node->child1()),
1890 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
1893 void compileBitLShift()
1896 lowInt32(m_node->child1()),
1897 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
1900 void compileBitURShift()
1902 setInt32(m_out.lShr(
1903 lowInt32(m_node->child1()),
1904 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
1907 void compileUInt32ToNumber()
1909 LValue value = lowInt32(m_node->child1());
1911 if (doesOverflow(m_node->arithMode())) {
1912 setDouble(m_out.unsignedToDouble(value));
1916 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
1920 void compileCheckStructure()
1922 LValue cell = lowCell(m_node->child1());
1925 if (m_node->child1()->hasConstant())
1926 exitKind = BadConstantCache;
1928 exitKind = BadCache;
1930 LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
1933 structureID, jsValueValue(cell), exitKind, m_node->structureSet(),
1934 [this] (Structure* structure) {
1935 return weakStructureID(structure);
1939 void compileCheckCell()
1941 LValue cell = lowCell(m_node->child1());
1944 BadCell, jsValueValue(cell), m_node->child1().node(),
1945 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
1948 void compileCheckBadCell()
1953 void compileCheckNotEmpty()
1955 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
1958 void compileGetExecutable()
1960 LValue cell = lowCell(m_node->child1());
1961 speculateFunction(m_node->child1(), cell);
1962 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
1965 void compileArrayifyToStructure()
1967 LValue cell = lowCell(m_node->child1());
1968 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
1970 LBasicBlock unexpectedStructure = FTL_NEW_BLOCK(m_out, ("ArrayifyToStructure unexpected structure"));
1971 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArrayifyToStructure continuation"));
1973 LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
1976 m_out.notEqual(structureID, weakStructureID(m_node->structure())),
1977 rarely(unexpectedStructure), usually(continuation));
1979 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
1982 switch (m_node->arrayMode().type()) {
1985 case Array::Contiguous:
1987 Uncountable, noValue(), 0,
1988 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
1995 switch (m_node->arrayMode().type()) {
1997 vmCall(m_out.operation(operationEnsureInt32), m_callFrame, cell);
2000 vmCall(m_out.operation(operationEnsureDouble), m_callFrame, cell);
2002 case Array::Contiguous:
2003 vmCall(m_out.operation(operationEnsureContiguous), m_callFrame, cell);
2005 case Array::ArrayStorage:
2006 case Array::SlowPutArrayStorage:
2007 vmCall(m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
2010 DFG_CRASH(m_graph, m_node, "Bad array type");
2014 structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
2016 BadIndexingType, jsValueValue(cell), 0,
2017 m_out.notEqual(structureID, weakStructureID(m_node->structure())));
2018 m_out.jump(continuation);
2020 m_out.appendTo(continuation, lastNext);
2023 void compilePutStructure()
2025 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
2027 Structure* oldStructure = m_node->transition()->previous;
2028 Structure* newStructure = m_node->transition()->next;
2029 ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
2030 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
2031 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
2033 LValue cell = lowCell(m_node->child1());
2035 weakStructureID(newStructure),
2036 cell, m_heaps.JSCell_structureID);
2039 void compileGetById()
2041 // Pretty much the only reason why we don't also support GetByIdFlush is because:
2042 // https://bugs.webkit.org/show_bug.cgi?id=125711
2044 switch (m_node->child1().useKind()) {
2046 setJSValue(getById(lowCell(m_node->child1())));
2051 // This is pretty weird, since we duplicate the slow path both here and in the
2052 // code generated by the IC. We should investigate making this less bad.
2053 // https://bugs.webkit.org/show_bug.cgi?id=127830
2054 LValue value = lowJSValue(m_node->child1());
2056 LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("GetById untyped cell case"));
2057 LBasicBlock notCellCase = FTL_NEW_BLOCK(m_out, ("GetById untyped not cell case"));
2058 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetById untyped continuation"));
2061 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2063 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2064 ValueFromBlock cellResult = m_out.anchor(getById(value));
2065 m_out.jump(continuation);
2067 m_out.appendTo(notCellCase, continuation);
2068 ValueFromBlock notCellResult = m_out.anchor(vmCall(
2069 m_out.operation(operationGetById),
2070 m_callFrame, getUndef(m_out.intPtr), value,
2071 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
2072 m_out.jump(continuation);
2074 m_out.appendTo(continuation, lastNext);
2075 setJSValue(m_out.phi(m_out.int64, cellResult, notCellResult));
2080 DFG_CRASH(m_graph, m_node, "Bad use kind");
2085 void compilePutById()
2087 // See above; CellUse is easier so we do only that for now.
2088 ASSERT(m_node->child1().useKind() == CellUse);
2090 LValue base = lowCell(m_node->child1());
2091 LValue value = lowJSValue(m_node->child2());
2092 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
2094 // Arguments: id, bytes, target, numArgs, args...
2095 unsigned stackmapID = m_stackmapIDs++;
2097 if (verboseCompilationEnabled())
2098 dataLog(" Emitting PutById patchpoint with stackmap #", stackmapID, "\n");
2100 LValue call = m_out.call(
2101 m_out.patchpointVoidIntrinsic(),
2102 m_out.constInt64(stackmapID), m_out.constInt32(sizeOfPutById()),
2103 constNull(m_out.ref8), m_out.constInt32(2), base, value);
2104 setInstructionCallingConvention(call, LLVMAnyRegCallConv);
2106 m_ftlState.putByIds.append(PutByIdDescriptor(
2107 stackmapID, m_node->origin.semantic, uid,
2108 m_graph.executableFor(m_node->origin.semantic)->ecmaMode(),
2109 m_node->op() == PutByIdDirect ? Direct : NotDirect));
2112 void compileGetButterfly()
2114 setStorage(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly));
2117 void compileConstantStoragePointer()
2119 setStorage(m_out.constIntPtr(m_node->storagePointer()));
2122 void compileGetIndexedPropertyStorage()
2124 LValue cell = lowCell(m_node->child1());
2126 if (m_node->arrayMode().type() == Array::String) {
2127 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("GetIndexedPropertyStorage String slow case"));
2128 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetIndexedPropertyStorage String continuation"));
2130 ValueFromBlock fastResult = m_out.anchor(
2131 m_out.loadPtr(cell, m_heaps.JSString_value));
2134 m_out.notNull(fastResult.value()), usually(continuation), rarely(slowPath));
2136 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
2138 ValueFromBlock slowResult = m_out.anchor(
2139 vmCall(m_out.operation(operationResolveRope), m_callFrame, cell));
2141 m_out.jump(continuation);
2143 m_out.appendTo(continuation, lastNext);
2145 setStorage(m_out.loadPtr(m_out.phi(m_out.intPtr, fastResult, slowResult), m_heaps.StringImpl_data));
2149 setStorage(m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector));
2152 void compileCheckArray()
2154 Edge edge = m_node->child1();
2155 LValue cell = lowCell(edge);
2157 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
2161 BadIndexingType, jsValueValue(cell), 0,
2162 m_out.bitNot(isArrayType(cell, m_node->arrayMode())));
2165 void compileGetTypedArrayByteOffset()
2167 LValue basePtr = lowCell(m_node->child1());
2169 LBasicBlock simpleCase = FTL_NEW_BLOCK(m_out, ("wasteless typed array"));
2170 LBasicBlock wastefulCase = FTL_NEW_BLOCK(m_out, ("wasteful typed array"));
2171 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("continuation branch"));
2173 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
2175 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
2176 unsure(simpleCase), unsure(wastefulCase));
2178 // begin simple case
2179 LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
2181 ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
2183 m_out.jump(continuation);
2185 // begin wasteful case
2186 m_out.appendTo(wastefulCase, continuation);
2188 LValue vectorPtr = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
2189 LValue butterflyPtr = m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly);
2190 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
2191 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
2193 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
2195 m_out.jump(continuation);
2196 m_out.appendTo(continuation, lastNext);
2199 setInt32(m_out.castToInt32(m_out.phi(m_out.intPtr, simpleOut, wastefulOut)));
2202 void compileGetArrayLength()
2204 switch (m_node->arrayMode().type()) {
2207 case Array::Contiguous: {
2208 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
2212 case Array::String: {
2213 LValue string = lowCell(m_node->child1());
2214 setInt32(m_out.load32NonNegative(string, m_heaps.JSString_length));
2218 case Array::DirectArguments: {
2219 LValue arguments = lowCell(m_node->child1());
2221 ExoticObjectMode, noValue(), nullptr,
2222 m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_overrides)));
2223 setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
2227 case Array::ScopedArguments: {
2228 LValue arguments = lowCell(m_node->child1());
2230 ExoticObjectMode, noValue(), nullptr,
2231 m_out.notZero8(m_out.load8(arguments, m_heaps.ScopedArguments_overrodeThings)));
2232 setInt32(m_out.load32NonNegative(arguments, m_heaps.ScopedArguments_totalLength));
2237 if (isTypedView(m_node->arrayMode().typedArrayType())) {
2239 m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
2243 DFG_CRASH(m_graph, m_node, "Bad array type");
2248 void compileCheckInBounds()
2251 OutOfBounds, noValue(), 0,
2252 m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2255 void compileGetByVal()
2257 switch (m_node->arrayMode().type()) {
2259 case Array::Contiguous: {
2260 LValue index = lowInt32(m_node->child2());
2261 LValue storage = lowStorage(m_node->child3());
2263 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
2264 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
2266 if (m_node->arrayMode().isInBounds()) {
2267 LValue result = m_out.load64(baseIndex(heap, storage, index, m_node->child2()));
2268 LValue isHole = m_out.isZero64(result);
2269 if (m_node->arrayMode().isSaneChain()) {
2271 m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous);
2272 result = m_out.select(
2273 isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result);
2275 speculate(LoadFromHole, noValue(), 0, isHole);
2280 LValue base = lowCell(m_node->child1());
2282 LBasicBlock fastCase = FTL_NEW_BLOCK(m_out, ("GetByVal int/contiguous fast case"));
2283 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("GetByVal int/contiguous slow case"));
2284 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal int/contiguous continuation"));
2288 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
2289 rarely(slowCase), usually(fastCase));
2291 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
2293 ValueFromBlock fastResult = m_out.anchor(
2294 m_out.load64(baseIndex(heap, storage, index, m_node->child2())));
2296 m_out.isZero64(fastResult.value()), rarely(slowCase), usually(continuation));
2298 m_out.appendTo(slowCase, continuation);
2299 ValueFromBlock slowResult = m_out.anchor(
2300 vmCall(m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
2301 m_out.jump(continuation);
2303 m_out.appendTo(continuation, lastNext);
2304 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
2308 case Array::Double: {
2309 LValue index = lowInt32(m_node->child2());
2310 LValue storage = lowStorage(m_node->child3());
2312 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
2314 if (m_node->arrayMode().isInBounds()) {
2315 LValue result = m_out.loadDouble(
2316 baseIndex(heap, storage, index, m_node->child2()));
2318 if (!m_node->arrayMode().isSaneChain()) {
2320 LoadFromHole, noValue(), 0,
2321 m_out.doubleNotEqualOrUnordered(result, result));
2327 LValue base = lowCell(m_node->child1());
2329 LBasicBlock inBounds = FTL_NEW_BLOCK(m_out, ("GetByVal double in bounds"));
2330 LBasicBlock boxPath = FTL_NEW_BLOCK(m_out, ("GetByVal double boxing"));
2331 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("GetByVal double slow case"));
2332 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal double continuation"));
2336 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
2337 rarely(slowCase), usually(inBounds));
2339 LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
2340 LValue doubleValue = m_out.loadDouble(
2341 baseIndex(heap, storage, index, m_node->child2()));
2343 m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue),
2344 rarely(slowCase), usually(boxPath));
2346 m_out.appendTo(boxPath, slowCase);
2347 ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue));
2348 m_out.jump(continuation);
2350 m_out.appendTo(slowCase, continuation);
2351 ValueFromBlock slowResult = m_out.anchor(
2352 vmCall(m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
2353 m_out.jump(continuation);
2355 m_out.appendTo(continuation, lastNext);
2356 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
2360 case Array::DirectArguments: {
2361 LValue base = lowCell(m_node->child1());
2362 LValue index = lowInt32(m_node->child2());
2365 ExoticObjectMode, noValue(), nullptr,
2366 m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_overrides)));
2368 ExoticObjectMode, noValue(), nullptr,
2371 m_out.load32NonNegative(base, m_heaps.DirectArguments_length)));
2373 TypedPointer address = m_out.baseIndex(
2374 m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
2375 setJSValue(m_out.load64(address));
2379 case Array::ScopedArguments: {
2380 LValue base = lowCell(m_node->child1());
2381 LValue index = lowInt32(m_node->child2());
2384 ExoticObjectMode, noValue(), nullptr,
2387 m_out.load32NonNegative(base, m_heaps.ScopedArguments_totalLength)));
2389 LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
2390 LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
2392 LBasicBlock namedCase = FTL_NEW_BLOCK(m_out, ("GetByVal ScopedArguments named case"));
2393 LBasicBlock overflowCase = FTL_NEW_BLOCK(m_out, ("GetByVal ScopedArguments overflow case"));
2394 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal ScopedArguments continuation"));
2397 m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
2399 LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
2401 LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
2402 LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
2404 TypedPointer address = m_out.baseIndex(
2405 m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
2406 LValue scopeOffset = m_out.load32(address);
2409 ExoticObjectMode, noValue(), nullptr,
2410 m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
2412 address = m_out.baseIndex(
2413 m_heaps.JSEnvironmentRecord_variables, scope, m_out.zeroExtPtr(scopeOffset));
2414 ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
2415 m_out.jump(continuation);
2417 m_out.appendTo(overflowCase, continuation);
2419 address = m_out.baseIndex(
2420 m_heaps.ScopedArguments_overflowStorage, base,
2421 m_out.zeroExtPtr(m_out.sub(index, namedLength)));
2422 LValue overflowValue = m_out.load64(address);
2423 speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
2424 ValueFromBlock overflowResult = m_out.anchor(overflowValue);
2425 m_out.jump(continuation);
2427 m_out.appendTo(continuation, lastNext);
2428 setJSValue(m_out.phi(m_out.int64, namedResult, overflowResult));
2432 case Array::Generic: {
2434 m_out.operation(operationGetByVal), m_callFrame,
2435 lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
2439 case Array::String: {
2440 compileStringCharAt();
2445 LValue index = lowInt32(m_node->child2());
2446 LValue storage = lowStorage(m_node->child3());
2448 TypedArrayType type = m_node->arrayMode().typedArrayType();
2450 if (isTypedView(type)) {
2451 TypedPointer pointer = TypedPointer(
2452 m_heaps.typedArrayProperties,
2456 m_out.zeroExtPtr(index),
2457 m_out.constIntPtr(logElementSize(type)))));
2461 switch (elementSize(type)) {
2463 result = m_out.load8(pointer);
2466 result = m_out.load16(pointer);
2469 result = m_out.load32(pointer);
2472 DFG_CRASH(m_graph, m_node, "Bad element size");
2475 if (elementSize(type) < 4) {
2477 result = m_out.signExt(result, m_out.int32);
2479 result = m_out.zeroExt(result, m_out.int32);
2484 if (isSigned(type)) {
2489 if (m_node->shouldSpeculateInt32()) {
2491 Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2496 if (m_node->shouldSpeculateMachineInt()) {
2497 setStrictInt52(m_out.zeroExt(result, m_out.int64));
2501 setDouble(m_out.unsignedToFP(result, m_out.doubleType));
2505 ASSERT(isFloat(type));
2510 result = m_out.fpCast(m_out.loadFloat(pointer), m_out.doubleType);
2513 result = m_out.loadDouble(pointer);
2516 DFG_CRASH(m_graph, m_node, "Bad typed array type");
2523 DFG_CRASH(m_graph, m_node, "Bad array type");
2528 void compileGetMyArgumentByVal()
2530 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
2532 LValue index = lowInt32(m_node->child2());
2535 if (inlineCallFrame && !inlineCallFrame->isVarargs())
2536 limit = m_out.constInt32(inlineCallFrame->arguments.size() - 1);
2538 VirtualRegister argumentCountRegister;
2539 if (!inlineCallFrame)
2540 argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
2542 argumentCountRegister = inlineCallFrame->argumentCountRegister;
2543 limit = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
2546 speculate(ExoticObjectMode, noValue(), 0, m_out.aboveOrEqual(index, limit));
2549 if (inlineCallFrame) {
2550 if (inlineCallFrame->arguments.size() <= 1) {
2551 // We should have already exited due to the bounds check, above. Just tell the
2552 // compiler that anything dominated by this instruction is not reachable, so
2553 // that we don't waste time generating such code. This will also plant some
2554 // kind of crashing instruction so that if by some fluke the bounds check didn't
2555 // work, we'll crash in an easy-to-see way.
2556 didAlreadyTerminate();
2559 base = addressFor(inlineCallFrame->arguments[1].virtualRegister());
2561 base = addressFor(virtualRegisterForArgument(1));
2563 LValue pointer = m_out.baseIndex(
2564 base.value(), m_out.zeroExt(index, m_out.intPtr), ScaleEight);
2565 setJSValue(m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer)));
2568 void compilePutByVal()
2570 Edge child1 = m_graph.varArgChild(m_node, 0);
2571 Edge child2 = m_graph.varArgChild(m_node, 1);
2572 Edge child3 = m_graph.varArgChild(m_node, 2);
2573 Edge child4 = m_graph.varArgChild(m_node, 3);
2574 Edge child5 = m_graph.varArgChild(m_node, 4);
2576 switch (m_node->arrayMode().type()) {
2577 case Array::Generic: {
2578 V_JITOperation_EJJJ operation;
2579 if (m_node->op() == PutByValDirect) {
2580 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2581 operation = operationPutByValDirectStrict;
2583 operation = operationPutByValDirectNonStrict;
2585 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2586 operation = operationPutByValStrict;
2588 operation = operationPutByValNonStrict;
2592 m_out.operation(operation), m_callFrame,
2593 lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
2601 LValue base = lowCell(child1);
2602 LValue index = lowInt32(child2);
2603 LValue storage = lowStorage(child4);
2605 switch (m_node->arrayMode().type()) {
2608 case Array::Contiguous: {
2609 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal continuation"));
2610 LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
2612 switch (m_node->arrayMode().type()) {
2614 case Array::Contiguous: {
2615 LValue value = lowJSValue(child3, ManualOperandSpeculation);
2617 if (m_node->arrayMode().type() == Array::Int32)
2618 FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32, isNotInt32(value));
2620 TypedPointer elementPointer = m_out.baseIndex(
2621 m_node->arrayMode().type() == Array::Int32 ?
2622 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
2623 storage, m_out.zeroExtPtr(index), provenValue(child2));
2625 if (m_node->op() == PutByValAlias) {
2626 m_out.store64(value, elementPointer);
2630 contiguousPutByValOutOfBounds(
2631 codeBlock()->isStrictMode()
2632 ? operationPutByValBeyondArrayBoundsStrict
2633 : operationPutByValBeyondArrayBoundsNonStrict,
2634 base, storage, index, value, continuation);
2636 m_out.store64(value, elementPointer);
2640 case Array::Double: {
2641 LValue value = lowDouble(child3);
2644 doubleValue(value), child3, SpecDoubleReal,
2645 m_out.doubleNotEqualOrUnordered(value, value));
2647 TypedPointer elementPointer = m_out.baseIndex(
2648 m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
2649 provenValue(child2));
2651 if (m_node->op() == PutByValAlias) {
2652 m_out.storeDouble(value, elementPointer);
2656 contiguousPutByValOutOfBounds(
2657 codeBlock()->isStrictMode()
2658 ? operationPutDoubleByValBeyondArrayBoundsStrict
2659 : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2660 base, storage, index, value, continuation);
2662 m_out.storeDouble(value, elementPointer);
2667 DFG_CRASH(m_graph, m_node, "Bad array type");
2670 m_out.jump(continuation);
2671 m_out.appendTo(continuation, outerLastNext);
2676 TypedArrayType type = m_node->arrayMode().typedArrayType();
2678 if (isTypedView(type)) {
2679 TypedPointer pointer = TypedPointer(
2680 m_heaps.typedArrayProperties,
2684 m_out.zeroExt(index, m_out.intPtr),
2685 m_out.constIntPtr(logElementSize(type)))));
2688 LValue valueToStore;
2692 switch (child3.useKind()) {
2695 if (child3.useKind() == Int32Use)
2696 intValue = lowInt32(child3);
2698 intValue = m_out.castToInt32(lowStrictInt52(child3));
2700 if (isClamped(type)) {
2701 ASSERT(elementSize(type) == 1);
2703 LBasicBlock atLeastZero = FTL_NEW_BLOCK(m_out, ("PutByVal int clamp atLeastZero"));
2704 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal int clamp continuation"));
2706 Vector<ValueFromBlock, 2> intValues;
2707 intValues.append(m_out.anchor(m_out.int32Zero));
2709 m_out.lessThan(intValue, m_out.int32Zero),
2710 unsure(continuation), unsure(atLeastZero));
2712 LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
2714 intValues.append(m_out.anchor(m_out.select(
2715 m_out.greaterThan(intValue, m_out.constInt32(255)),
2716 m_out.constInt32(255),
2718 m_out.jump(continuation);
2720 m_out.appendTo(continuation, lastNext);
2721 intValue = m_out.phi(m_out.int32, intValues);
2726 case DoubleRepUse: {
2727 LValue doubleValue = lowDouble(child3);
2729 if (isClamped(type)) {
2730 ASSERT(elementSize(type) == 1);
2732 LBasicBlock atLeastZero = FTL_NEW_BLOCK(m_out, ("PutByVal double clamp atLeastZero"));
2733 LBasicBlock withinRange = FTL_NEW_BLOCK(m_out, ("PutByVal double clamp withinRange"));
2734 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal double clamp continuation"));
2736 Vector<ValueFromBlock, 3> intValues;
2737 intValues.append(m_out.anchor(m_out.int32Zero));
2739 m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
2740 unsure(continuation), unsure(atLeastZero));
2742 LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
2743 intValues.append(m_out.anchor(m_out.constInt32(255)));
2745 m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
2746 unsure(continuation), unsure(withinRange));
2748 m_out.appendTo(withinRange, continuation);
2749 intValues.append(m_out.anchor(m_out.fpToInt32(doubleValue)));
2750 m_out.jump(continuation);
2752 m_out.appendTo(continuation, lastNext);
2753 intValue = m_out.phi(m_out.int32, intValues);
2755 intValue = doubleToInt32(doubleValue);
2760 DFG_CRASH(m_graph, m_node, "Bad use kind");
2763 switch (elementSize(type)) {
2765 valueToStore = m_out.intCast(intValue, m_out.int8);
2766 refType = m_out.ref8;
2769 valueToStore = m_out.intCast(intValue, m_out.int16);
2770 refType = m_out.ref16;
2773 valueToStore = intValue;
2774 refType = m_out.ref32;
2777 DFG_CRASH(m_graph, m_node, "Bad element size");
2779 } else /* !isInt(type) */ {
2780 LValue value = lowDouble(child3);
2783 valueToStore = m_out.fpCast(value, m_out.floatType);
2784 refType = m_out.refFloat;
2787 valueToStore = value;
2788 refType = m_out.refDouble;
2791 DFG_CRASH(m_graph, m_node, "Bad typed array type");
2795 if (m_node->arrayMode().isInBounds() || m_node->op() == PutByValAlias)
2796 m_out.store(valueToStore, pointer, refType);
2798 LBasicBlock isInBounds = FTL_NEW_BLOCK(m_out, ("PutByVal typed array in bounds case"));
2799 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal typed array continuation"));
2802 m_out.aboveOrEqual(index, lowInt32(child5)),
2803 unsure(continuation), unsure(isInBounds));
2805 LBasicBlock lastNext = m_out.appendTo(isInBounds, continuation);
2806 m_out.store(valueToStore, pointer, refType);
2807 m_out.jump(continuation);
2809 m_out.appendTo(continuation, lastNext);
2815 DFG_CRASH(m_graph, m_node, "Bad array type");
2820 void compileArrayPush()
2822 LValue base = lowCell(m_node->child1());
2823 LValue storage = lowStorage(m_node->child3());
2825 switch (m_node->arrayMode().type()) {
2827 case Array::Contiguous:
2828 case Array::Double: {
2832 if (m_node->arrayMode().type() != Array::Double) {
2833 value = lowJSValue(m_node->child2(), ManualOperandSpeculation);
2834 if (m_node->arrayMode().type() == Array::Int32) {
2836 jsValueValue(value), m_node->child2(), SpecInt32, isNotInt32(value));
2838 refType = m_out.ref64;
2840 value = lowDouble(m_node->child2());
2842 doubleValue(value), m_node->child2(), SpecDoubleReal,
2843 m_out.doubleNotEqualOrUnordered(value, value));
2844 refType = m_out.refDouble;
2847 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
2849 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
2851 LBasicBlock fastPath = FTL_NEW_BLOCK(m_out, ("ArrayPush fast path"));
2852 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("ArrayPush slow path"));
2853 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArrayPush continuation"));
2857 prevLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength)),
2858 rarely(slowPath), usually(fastPath));
2860 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
2862 value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), refType);
2863 LValue newLength = m_out.add(prevLength, m_out.int32One);
2864 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
2866 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
2867 m_out.jump(continuation);
2869 m_out.appendTo(slowPath, continuation);
2871 if (m_node->arrayMode().type() != Array::Double)
2872 operation = m_out.operation(operationArrayPush);
2874 operation = m_out.operation(operationArrayPushDouble);
2875 ValueFromBlock slowResult = m_out.anchor(
2876 vmCall(operation, m_callFrame, value, base));
2877 m_out.jump(continuation);
2879 m_out.appendTo(continuation, lastNext);
2880 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
2885 DFG_CRASH(m_graph, m_node, "Bad array type");
2890 void compileArrayPop()
2892 LValue base = lowCell(m_node->child1());
2893 LValue storage = lowStorage(m_node->child2());
2895 switch (m_node->arrayMode().type()) {
2898 case Array::Contiguous: {
2899 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
2901 LBasicBlock fastCase = FTL_NEW_BLOCK(m_out, ("ArrayPop fast case"));
2902 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ArrayPop slow case"));
2903 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArrayPop continuation"));
2905 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
2907 Vector<ValueFromBlock, 3> results;
2908 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
2910 m_out.isZero32(prevLength), rarely(continuation), usually(fastCase));
2912 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
2913 LValue newLength = m_out.sub(prevLength, m_out.int32One);
2914 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
2915 TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength));
2916 if (m_node->arrayMode().type() != Array::Double) {
2917 LValue result = m_out.load64(pointer);
2918 m_out.store64(m_out.int64Zero, pointer);
2919 results.append(m_out.anchor(result));
2921 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2923 LValue result = m_out.loadDouble(pointer);
2924 m_out.store64(m_out.constInt64(bitwise_cast<int64_t>(PNaN)), pointer);
2925 results.append(m_out.anchor(boxDouble(result)));
2927 m_out.doubleEqual(result, result),
2928 usually(continuation), rarely(slowCase));
2931 m_out.appendTo(slowCase, continuation);
2932 results.append(m_out.anchor(vmCall(
2933 m_out.operation(operationArrayPopAndRecoverLength), m_callFrame, base)));
2934 m_out.jump(continuation);
2936 m_out.appendTo(continuation, lastNext);
2937 setJSValue(m_out.phi(m_out.int64, results));
2942 DFG_CRASH(m_graph, m_node, "Bad array type");
2947 void compileCreateActivation()
2949 LValue scope = lowCell(m_node->child1());
2950 SymbolTable* table = m_node->castOperand<SymbolTable*>();
2951 Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure();
2953 if (table->singletonScope()->isStillValid()) {
2954 LValue callResult = vmCall(
2955 m_out.operation(operationCreateActivationDirect), m_callFrame, weakPointer(structure),
2956 scope, weakPointer(table));
2957 setJSValue(callResult);
2961 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("CreateActivation slow path"));
2962 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CreateActivation continuation"));
2964 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
2966 LValue fastObject = allocateObject<JSLexicalEnvironment>(
2967 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
2969 // We don't need memory barriers since we just fast-created the activation, so the
2970 // activation must be young.
2971 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
2972 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
2974 for (unsigned i = 0; i < table->scopeSize(); ++i) {
2976 m_out.constInt64(JSValue::encode(jsUndefined())),
2977 fastObject, m_heaps.JSEnvironmentRecord_variables[i]);
2980 ValueFromBlock fastResult = m_out.anchor(fastObject);
2981 m_out.jump(continuation);
2983 m_out.appendTo(slowPath, continuation);
2984 LValue callResult = vmCall(
2985 m_out.operation(operationCreateActivationDirect), m_callFrame, weakPointer(structure),
2986 scope, weakPointer(table));
2987 ValueFromBlock slowResult = m_out.anchor(callResult);
2988 m_out.jump(continuation);
2990 m_out.appendTo(continuation, lastNext);
2991 setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult));
2994 void compileNewFunction()
2996 LValue scope = lowCell(m_node->child1());
2997 FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
2998 if (executable->singletonFunction()->isStillValid()) {
2999 LValue callResult = vmCall(
3000 m_out.operation(operationNewFunction), m_callFrame, scope, weakPointer(executable));
3001 setJSValue(callResult);
3005 Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->functionStructure();
3007 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("NewFunction slow path"));
3008 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("NewFunction continuation"));
3010 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
3012 LValue fastObject = allocateObject<JSFunction>(
3013 structure, m_out.intPtrZero, slowPath);
3015 // We don't need memory barriers since we just fast-created the function, so it
3017 m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
3018 m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable);
3019 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData);
3021 ValueFromBlock fastResult = m_out.anchor(fastObject);
3022 m_out.jump(continuation);
3024 m_out.appendTo(slowPath, continuation);
3025 LValue callResult = vmCall(
3026 m_out.operation(operationNewFunctionWithInvalidatedReallocationWatchpoint),
3027 m_callFrame, scope, weakPointer(executable));
3028 ValueFromBlock slowResult = m_out.anchor(callResult);
3029 m_out.jump(continuation);
3031 m_out.appendTo(continuation, lastNext);
3032 setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult));
3035 void compileCreateDirectArguments()
3037 // FIXME: A more effective way of dealing with the argument count and callee is to have
3038 // them be explicit arguments to this node.
3039 // https://bugs.webkit.org/show_bug.cgi?id=142207
3041 Structure* structure =
3042 m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure();
3044 unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1;
3046 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments slow path"));
3047 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments continuation"));
3049 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
3051 ArgumentsLength length = getArgumentsLength();
3054 if (length.isKnown) {
3055 fastObject = allocateObject<DirectArguments>(
3056 DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure,
3057 m_out.intPtrZero, slowPath);
3059 LValue size = m_out.add(