2 * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLLowerDFGToLLVM.h"
31 #include "CodeBlockWithJITType.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGInPlaceAbstractState.h"
34 #include "DFGOSRAvailabilityAnalysisPhase.h"
35 #include "DirectArguments.h"
36 #include "FTLAbstractHeapRepository.h"
37 #include "FTLAvailableRecovery.h"
38 #include "FTLForOSREntryJITCode.h"
39 #include "FTLFormattedValue.h"
40 #include "FTLInlineCacheSize.h"
41 #include "FTLLoweredNodeValue.h"
42 #include "FTLOperations.h"
43 #include "FTLOutput.h"
44 #include "FTLThunks.h"
45 #include "FTLWeightedTarget.h"
46 #include "JSCInlines.h"
47 #include "JSLexicalEnvironment.h"
48 #include "OperandsInlines.h"
49 #include "ScopedArguments.h"
50 #include "ScopedArgumentsTable.h"
51 #include "VirtualRegister.h"
54 #include <llvm/InitializeLLVM.h>
55 #include <wtf/ProcessID.h>
57 #if ENABLE(FTL_NATIVE_CALL_INLINING)
58 #include "BundlePath.h"
61 namespace JSC { namespace FTL {
65 static std::atomic<int> compileCounter;
68 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable()
73 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
74 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
76 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
77 if (nodeIndex != UINT_MAX)
78 dataLog(", node @", nodeIndex);
84 // Using this instead of typeCheck() helps to reduce the load on LLVM, by creating
85 // significantly less dead code.
86 #define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) do { \
87 FormattedValue _ftc_lowValue = (lowValue); \
88 Edge _ftc_highValue = (highValue); \
89 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
90 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
92 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition)); \
95 class LowerDFGToLLVM {
97 LowerDFGToLLVM(State& state)
98 : m_graph(state.graph)
100 , m_heaps(state.context)
101 , m_out(state.context)
102 , m_state(state.graph)
103 , m_interpreter(state.graph, m_state)
105 , m_tbaaKind(mdKindID(state.context, "tbaa"))
106 , m_tbaaStructKind(mdKindID(state.context, "tbaa.struct"))
113 if (verboseCompilationEnabled()) {
115 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
116 "_", codeBlock()->hash());
120 m_graph.m_dominators.computeIfNecessary(m_graph);
123 moduleCreateWithNameInContext(name.data(), m_ftlState.context);
125 m_ftlState.function = addFunction(
126 m_ftlState.module, name.data(), functionType(m_out.int64));
127 setFunctionCallingConv(m_ftlState.function, LLVMCCallConv);
128 if (isX86() && Options::llvmDisallowAVX()) {
129 // AVX makes V8/raytrace 80% slower. It makes Kraken/audio-oscillator 4.5x
130 // slower. It should be disabled.
131 addTargetDependentFunctionAttr(m_ftlState.function, "target-features", "-avx");
134 if (verboseCompilationEnabled())
135 dataLog("Function ready, beginning lowering.\n");
137 m_out.initialize(m_ftlState.module, m_ftlState.function, m_heaps);
139 m_prologue = FTL_NEW_BLOCK(m_out, ("Prologue"));
140 LBasicBlock stackOverflow = FTL_NEW_BLOCK(m_out, ("Stack overflow"));
141 m_handleExceptions = FTL_NEW_BLOCK(m_out, ("Handle Exceptions"));
143 LBasicBlock checkArguments = FTL_NEW_BLOCK(m_out, ("Check arguments"));
145 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
146 m_highBlock = m_graph.block(blockIndex);
149 m_blocks.add(m_highBlock, FTL_NEW_BLOCK(m_out, ("Block ", *m_highBlock)));
152 m_out.appendTo(m_prologue, stackOverflow);
153 createPhiVariables();
155 auto preOrder = m_graph.blocksInPreOrder();
157 int maxNumberOfArguments = -1;
158 for (BasicBlock* block : preOrder) {
159 for (unsigned nodeIndex = block->size(); nodeIndex--; ) {
160 Node* node = block->at(nodeIndex);
161 switch (node->op()) {
163 case NativeConstruct: {
164 int numArgs = node->numChildren();
165 if (numArgs > maxNumberOfArguments)
166 maxNumberOfArguments = numArgs;
175 if (maxNumberOfArguments >= 0) {
176 m_execState = m_out.alloca(arrayType(m_out.int64, JSStack::CallFrameHeaderSize + maxNumberOfArguments));
177 m_execStorage = m_out.ptrToInt(m_execState, m_out.intPtr);
180 LValue capturedAlloca = m_out.alloca(arrayType(m_out.int64, m_graph.m_nextMachineLocal));
182 m_captured = m_out.add(
183 m_out.ptrToInt(capturedAlloca, m_out.intPtr),
184 m_out.constIntPtr(m_graph.m_nextMachineLocal * sizeof(Register)));
186 m_ftlState.capturedStackmapID = m_stackmapIDs++;
188 m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.capturedStackmapID),
189 m_out.int32Zero, capturedAlloca);
191 // If we have any CallVarargs then we nee to have a spill slot for it.
192 bool hasVarargs = false;
193 for (BasicBlock* block : preOrder) {
194 for (Node* node : *block) {
195 switch (node->op()) {
197 case CallForwardVarargs:
198 case ConstructVarargs:
199 case ConstructForwardVarargs:
208 LValue varargsSpillSlots = m_out.alloca(
209 arrayType(m_out.int64, JSCallVarargs::numSpillSlotsNeeded()));
210 m_ftlState.varargsSpillSlotsStackmapID = m_stackmapIDs++;
212 m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.varargsSpillSlotsStackmapID),
213 m_out.int32Zero, varargsSpillSlots);
216 // We should not create any alloca's after this point, since they will cease to
217 // be mem2reg candidates.
219 m_callFrame = m_out.ptrToInt(
220 m_out.call(m_out.frameAddressIntrinsic(), m_out.int32Zero), m_out.intPtr);
221 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
222 m_tagMask = m_out.constInt64(TagMask);
224 m_out.storePtr(m_out.constIntPtr(codeBlock()), addressFor(JSStack::CodeBlock));
227 didOverflowStack(), rarely(stackOverflow), usually(checkArguments));
229 m_out.appendTo(stackOverflow, m_handleExceptions);
230 m_out.call(m_out.operation(operationThrowStackOverflowError), m_callFrame, m_out.constIntPtr(codeBlock()));
231 m_ftlState.handleStackOverflowExceptionStackmapID = m_stackmapIDs++;
233 m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.handleStackOverflowExceptionStackmapID),
234 m_out.constInt32(MacroAssembler::maxJumpReplacementSize()));
237 m_out.appendTo(m_handleExceptions, checkArguments);
238 m_ftlState.handleExceptionStackmapID = m_stackmapIDs++;
240 m_out.stackmapIntrinsic(), m_out.constInt64(m_ftlState.handleExceptionStackmapID),
241 m_out.constInt32(MacroAssembler::maxJumpReplacementSize()));
244 m_out.appendTo(checkArguments, lowBlock(m_graph.block(0)));
245 availabilityMap().clear();
246 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
247 for (unsigned i = codeBlock()->numParameters(); i--;) {
248 availabilityMap().m_locals.argument(i) =
249 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
251 m_codeOriginForExitTarget = CodeOrigin(0);
252 m_codeOriginForExitProfile = CodeOrigin(0);
254 for (unsigned i = codeBlock()->numParameters(); i--;) {
255 Node* node = m_graph.m_arguments[i];
256 VirtualRegister operand = virtualRegisterForArgument(i);
258 LValue jsValue = m_out.load64(addressFor(operand));
261 DFG_ASSERT(m_graph, node, operand == node->stackAccessData()->machineLocal);
263 // This is a hack, but it's an effective one. It allows us to do CSE on the
264 // primordial load of arguments. This assumes that the GetLocal that got put in
265 // place of the original SetArgument doesn't have any effects before it. This
267 m_loadedArgumentValues.add(node, jsValue);
270 switch (m_graph.m_argumentFormats[i]) {
272 speculate(BadType, jsValueValue(jsValue), node, isNotInt32(jsValue));
275 speculate(BadType, jsValueValue(jsValue), node, isNotBoolean(jsValue));
278 speculate(BadType, jsValueValue(jsValue), node, isNotCell(jsValue));
283 DFG_CRASH(m_graph, node, "Bad flush format for argument");
287 m_out.jump(lowBlock(m_graph.block(0)));
289 for (BasicBlock* block : preOrder)
292 if (Options::dumpLLVMIR())
293 dumpModule(m_ftlState.module);
295 if (verboseCompilationEnabled())
296 m_ftlState.dumpState("after lowering");
297 if (validationEnabled())
298 verifyModule(m_ftlState.module);
303 void createPhiVariables()
305 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
306 BasicBlock* block = m_graph.block(blockIndex);
309 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
310 Node* node = block->at(nodeIndex);
311 if (node->op() != Phi)
314 switch (node->flags() & NodeResultMask) {
315 case NodeResultDouble:
316 type = m_out.doubleType;
318 case NodeResultInt32:
321 case NodeResultInt52:
324 case NodeResultBoolean:
325 type = m_out.boolean;
331 DFG_CRASH(m_graph, node, "Bad Phi node result type");
334 m_phis.add(node, buildAlloca(m_out.m_builder, type));
339 void compileBlock(BasicBlock* block)
344 if (verboseCompilationEnabled())
345 dataLog("Compiling block ", *block, "\n");
349 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
352 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
353 m_nextHighBlock = m_graph.block(nextBlockIndex);
357 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
359 // All of this effort to find the next block gives us the ability to keep the
360 // generated IR in roughly program order. This ought not affect the performance
361 // of the generated code (since we expect LLVM to reorder things) but it will
362 // make IR dumps easier to read.
363 m_out.appendTo(lowBlock, m_nextLowBlock);
365 if (Options::ftlCrashes())
368 if (!m_highBlock->cfaHasVisited) {
369 if (verboseCompilationEnabled())
370 dataLog("Bailing because CFA didn't reach.\n");
371 crash(m_highBlock->index, UINT_MAX);
375 m_availabilityCalculator.beginBlock(m_highBlock);
378 m_state.beginBasicBlock(m_highBlock);
380 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
381 if (!compileNode(m_nodeIndex))
386 void safelyInvalidateAfterTermination()
388 if (verboseCompilationEnabled())
389 dataLog("Bailing.\n");
392 // Invalidate dominated blocks. Under normal circumstances we would expect
393 // them to be invalidated already. But you can have the CFA become more
394 // precise over time because the structures of objects change on the main
395 // thread. Failing to do this would result in weird crashes due to a value
396 // being used but not defined. Race conditions FTW!
397 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
398 BasicBlock* target = m_graph.block(blockIndex);
401 if (m_graph.m_dominators.dominates(m_highBlock, target)) {
402 if (verboseCompilationEnabled())
403 dataLog("Block ", *target, " will bail also.\n");
404 target->cfaHasVisited = false;
409 bool compileNode(unsigned nodeIndex)
411 if (!m_state.isValid()) {
412 safelyInvalidateAfterTermination();
416 m_node = m_highBlock->at(nodeIndex);
417 m_codeOriginForExitProfile = m_node->origin.semantic;
418 m_codeOriginForExitTarget = m_node->origin.forExit;
420 if (verboseCompilationEnabled())
421 dataLog("Lowering ", m_node, "\n");
423 m_availableRecoveries.resize(0);
425 m_interpreter.startExecuting();
427 switch (m_node->op()) {
437 compileDoubleConstant();
440 compileInt52Constant();
452 compileValueToInt32();
454 case BooleanToNumber:
455 compileBooleanToNumber();
457 case ExtractOSREntryLocal:
458 compileExtractOSREntryLocal();
478 compileArithAddOrSub();
494 compileArithMinOrMax();
515 compileArithFRound();
518 compileArithNegate();
539 compileUInt32ToNumber();
542 compileCheckStructure();
548 compileCheckNotEmpty();
551 compileCheckBadCell();
554 compileGetExecutable();
556 case ArrayifyToStructure:
557 compileArrayifyToStructure();
560 compilePutStructure();
573 compileGetButterfly();
575 case ConstantStoragePointer:
576 compileConstantStoragePointer();
578 case GetIndexedPropertyStorage:
579 compileGetIndexedPropertyStorage();
585 compileGetArrayLength();
588 compileCheckInBounds();
593 case GetMyArgumentByVal:
594 compileGetMyArgumentByVal();
607 case CreateActivation:
608 compileCreateActivation();
611 compileNewFunction();
613 case CreateDirectArguments:
614 compileCreateDirectArguments();
616 case CreateScopedArguments:
617 compileCreateScopedArguments();
619 case CreateClonedArguments:
620 compileCreateClonedArguments();
629 compileNewArrayBuffer();
631 case NewArrayWithSize:
632 compileNewArrayWithSize();
634 case GetTypedArrayByteOffset:
635 compileGetTypedArrayByteOffset();
637 case AllocatePropertyStorage:
638 compileAllocatePropertyStorage();
640 case ReallocatePropertyStorage:
641 compileReallocatePropertyStorage();
644 case CallStringConstructor:
645 compileToStringOrCallStringConstructor();
648 compileToPrimitive();
654 compileStringCharAt();
656 case StringCharCodeAt:
657 compileStringCharCodeAt();
660 case GetGetterSetterByOffset:
661 compileGetByOffset();
669 case MultiGetByOffset:
670 compileMultiGetByOffset();
673 compilePutByOffset();
675 case MultiPutByOffset:
676 compileMultiPutByOffset();
679 compileGetGlobalVar();
682 compilePutGlobalVar();
685 compileNotifyWrite();
690 case GetArgumentCount:
691 compileGetArgumentCount();
700 compileGetClosureVar();
703 compilePutClosureVar();
705 case GetFromArguments:
706 compileGetFromArguments();
709 compilePutToArguments();
714 case CompareEqConstant:
715 compileCompareEqConstant();
717 case CompareStrictEq:
718 compileCompareStrictEq();
721 compileCompareLess();
724 compileCompareLessEq();
727 compileCompareGreater();
729 case CompareGreaterEq:
730 compileCompareGreaterEq();
737 compileCallOrConstruct();
740 case CallForwardVarargs:
741 case ConstructVarargs:
742 case ConstructForwardVarargs:
743 compileCallOrConstructVarargs();
746 compileLoadVarargs();
749 compileForwardVarargs();
751 #if ENABLE(FTL_NATIVE_CALL_INLINING)
753 case NativeConstruct:
754 compileNativeCallOrConstruct();
770 compileForceOSRExit();
773 case ThrowReferenceError:
776 case InvalidationPoint:
777 compileInvalidationPoint();
780 compileIsUndefined();
795 compileIsObjectOrNull();
800 case CheckHasInstance:
801 compileCheckHasInstance();
807 compileCountExecution();
810 compileStoreBarrier();
812 case StoreBarrierWithNullCheck:
813 compileStoreBarrierWithNullCheck();
815 case HasIndexedProperty:
816 compileHasIndexedProperty();
818 case HasGenericProperty:
819 compileHasGenericProperty();
821 case HasStructureProperty:
822 compileHasStructureProperty();
825 compileGetDirectPname();
827 case GetEnumerableLength:
828 compileGetEnumerableLength();
830 case GetPropertyEnumerator:
831 compileGetPropertyEnumerator();
833 case GetEnumeratorStructurePname:
834 compileGetEnumeratorStructurePname();
836 case GetEnumeratorGenericPname:
837 compileGetEnumeratorGenericPname();
840 compileToIndexString();
842 case CheckStructureImmediate:
843 compileCheckStructureImmediate();
845 case MaterializeNewObject:
846 compileMaterializeNewObject();
853 case PhantomNewObject:
854 case PhantomNewFunction:
855 case PhantomDirectArguments:
856 case PhantomClonedArguments:
862 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
866 if (m_node->isTerminal())
869 if (!m_state.isValid()) {
870 safelyInvalidateAfterTermination();
874 m_availabilityCalculator.executeNode(m_node);
875 m_interpreter.executeEffects(nodeIndex);
880 void compileUpsilon()
882 LValue destination = m_phis.get(m_node->phi());
884 switch (m_node->child1().useKind()) {
886 m_out.set(lowDouble(m_node->child1()), destination);
889 m_out.set(lowInt32(m_node->child1()), destination);
892 m_out.set(lowInt52(m_node->child1()), destination);
895 m_out.set(lowBoolean(m_node->child1()), destination);
898 m_out.set(lowCell(m_node->child1()), destination);
901 m_out.set(lowJSValue(m_node->child1()), destination);
904 DFG_CRASH(m_graph, m_node, "Bad use kind");
911 LValue source = m_phis.get(m_node);
913 switch (m_node->flags() & NodeResultMask) {
914 case NodeResultDouble:
915 setDouble(m_out.get(source));
917 case NodeResultInt32:
918 setInt32(m_out.get(source));
920 case NodeResultInt52:
921 setInt52(m_out.get(source));
923 case NodeResultBoolean:
924 setBoolean(m_out.get(source));
927 setJSValue(m_out.get(source));
930 DFG_CRASH(m_graph, m_node, "Bad use kind");
935 void compileDoubleConstant()
937 setDouble(m_out.constDouble(m_node->asNumber()));
940 void compileInt52Constant()
942 int64_t value = m_node->asMachineInt();
944 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
945 setStrictInt52(m_out.constInt64(value));
948 void compileDoubleRep()
950 switch (m_node->child1().useKind()) {
952 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
953 setDouble(jsValueToDouble(m_node->child1(), value));
958 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
963 DFG_CRASH(m_graph, m_node, "Bad use kind");
967 void compileValueRep()
969 switch (m_node->child1().useKind()) {
971 LValue value = lowDouble(m_node->child1());
973 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
974 value = m_out.select(
975 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
978 setJSValue(boxDouble(value));
983 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
988 DFG_CRASH(m_graph, m_node, "Bad use kind");
992 void compileInt52Rep()
994 switch (m_node->child1().useKind()) {
996 setStrictInt52(m_out.signExt(lowInt32(m_node->child1()), m_out.int64));
1001 jsValueToStrictInt52(
1002 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1005 case DoubleRepMachineIntUse:
1007 doubleToStrictInt52(
1008 m_node->child1(), lowDouble(m_node->child1())));
1012 RELEASE_ASSERT_NOT_REACHED();
1016 void compileValueToInt32()
1018 switch (m_node->child1().useKind()) {
1020 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1024 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1029 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1030 if (isValid(value)) {
1031 setInt32(value.value());
1035 value = m_jsValueValues.get(m_node->child1().node());
1036 if (isValid(value)) {
1037 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1041 // We'll basically just get here for constants. But it's good to have this
1042 // catch-all since we often add new representations into the mix.
1044 numberOrNotCellToInt32(
1046 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1051 DFG_CRASH(m_graph, m_node, "Bad use kind");
1056 void compileBooleanToNumber()
1058 switch (m_node->child1().useKind()) {
1060 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), m_out.int32));
1065 LValue value = lowJSValue(m_node->child1());
1067 LBasicBlock booleanCase = FTL_NEW_BLOCK(m_out, ("BooleanToNumber boolean case"));
1068 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("BooleanToNumber continuation"));
1070 ValueFromBlock notBooleanResult = m_out.anchor(value);
1071 m_out.branch(isBoolean(value), unsure(booleanCase), unsure(continuation));
1073 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1074 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1075 m_out.zeroExt(unboxBoolean(value), m_out.int64), m_tagTypeNumber));
1076 m_out.jump(continuation);
1078 m_out.appendTo(continuation, lastNext);
1079 setJSValue(m_out.phi(m_out.int64, booleanResult, notBooleanResult));
1084 RELEASE_ASSERT_NOT_REACHED();
1089 void compileExtractOSREntryLocal()
1091 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1092 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1093 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1096 void compileGetStack()
1098 // GetLocals arise only for captured variables and arguments. For arguments, we might have
1099 // already loaded it.
1100 if (LValue value = m_loadedArgumentValues.get(m_node)) {
1105 StackAccessData* data = m_node->stackAccessData();
1106 AbstractValue& value = m_state.variables().operand(data->local);
1108 DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
1109 DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
1111 if (isInt32Speculation(value.m_type))
1112 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1114 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1117 void compilePutStack()
1119 StackAccessData* data = m_node->stackAccessData();
1120 switch (data->format) {
1121 case FlushedJSValue: {
1122 LValue value = lowJSValue(m_node->child1());
1123 m_out.store64(value, addressFor(data->machineLocal));
1127 case FlushedDouble: {
1128 LValue value = lowDouble(m_node->child1());
1129 m_out.storeDouble(value, addressFor(data->machineLocal));
1133 case FlushedInt32: {
1134 LValue value = lowInt32(m_node->child1());
1135 m_out.store32(value, payloadFor(data->machineLocal));
1139 case FlushedInt52: {
1140 LValue value = lowInt52(m_node->child1());
1141 m_out.store64(value, addressFor(data->machineLocal));
1146 LValue value = lowCell(m_node->child1());
1147 m_out.store64(value, addressFor(data->machineLocal));
1151 case FlushedBoolean: {
1152 speculateBoolean(m_node->child1());
1154 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1155 addressFor(data->machineLocal));
1160 DFG_CRASH(m_graph, m_node, "Bad flush format");
1165 void compilePhantom()
1167 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1170 void compileToThis()
1172 LValue value = lowJSValue(m_node->child1());
1174 LBasicBlock isCellCase = FTL_NEW_BLOCK(m_out, ("ToThis is cell case"));
1175 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ToThis slow case"));
1176 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ToThis continuation"));
1178 m_out.branch(isCell(value), usually(isCellCase), rarely(slowCase));
1180 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1181 ValueFromBlock fastResult = m_out.anchor(value);
1182 m_out.branch(isType(value, FinalObjectType), usually(continuation), rarely(slowCase));
1184 m_out.appendTo(slowCase, continuation);
1185 J_JITOperation_EJ function;
1186 if (m_graph.isStrictModeFor(m_node->origin.semantic))
1187 function = operationToThisStrict;
1189 function = operationToThis;
1190 ValueFromBlock slowResult = m_out.anchor(
1191 vmCall(m_out.operation(function), m_callFrame, value));
1192 m_out.jump(continuation);
1194 m_out.appendTo(continuation, lastNext);
1195 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
1198 void compileValueAdd()
1200 J_JITOperation_EJJ operation;
1201 if (!(m_state.forNode(m_node->child1()).m_type & SpecFullNumber)
1202 && !(m_state.forNode(m_node->child2()).m_type & SpecFullNumber))
1203 operation = operationValueAddNotNumber;
1205 operation = operationValueAdd;
1207 m_out.operation(operation), m_callFrame,
1208 lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
1211 void compileArithAddOrSub()
1213 bool isSub = m_node->op() == ArithSub;
1214 switch (m_node->binaryUseKind()) {
1216 LValue left = lowInt32(m_node->child1());
1217 LValue right = lowInt32(m_node->child2());
1219 if (!shouldCheckOverflow(m_node->arithMode())) {
1220 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
1226 result = m_out.addWithOverflow32(left, right);
1228 if (doesKill(m_node->child2())) {
1229 addAvailableRecovery(
1230 m_node->child2(), SubRecovery,
1231 m_out.extractValue(result, 0), left, ValueFormatInt32);
1232 } else if (doesKill(m_node->child1())) {
1233 addAvailableRecovery(
1234 m_node->child1(), SubRecovery,
1235 m_out.extractValue(result, 0), right, ValueFormatInt32);
1238 result = m_out.subWithOverflow32(left, right);
1240 if (doesKill(m_node->child2())) {
1241 // result = left - right
1242 // result - left = -right
1243 // right = left - result
1244 addAvailableRecovery(
1245 m_node->child2(), SubRecovery,
1246 left, m_out.extractValue(result, 0), ValueFormatInt32);
1247 } else if (doesKill(m_node->child1())) {
1248 // result = left - right
1249 // result + right = left
1250 addAvailableRecovery(
1251 m_node->child1(), AddRecovery,
1252 m_out.extractValue(result, 0), right, ValueFormatInt32);
1256 speculate(Overflow, noValue(), 0, m_out.extractValue(result, 1));
1257 setInt32(m_out.extractValue(result, 0));
1262 if (!m_state.forNode(m_node->child1()).couldBeType(SpecInt52)
1263 && !m_state.forNode(m_node->child2()).couldBeType(SpecInt52)) {
1265 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1266 LValue right = lowInt52(m_node->child2(), kind);
1267 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
1271 LValue left = lowInt52(m_node->child1());
1272 LValue right = lowInt52(m_node->child2());
1276 result = m_out.addWithOverflow64(left, right);
1278 if (doesKill(m_node->child2())) {
1279 addAvailableRecovery(
1280 m_node->child2(), SubRecovery,
1281 m_out.extractValue(result, 0), left, ValueFormatInt52);
1282 } else if (doesKill(m_node->child1())) {
1283 addAvailableRecovery(
1284 m_node->child1(), SubRecovery,
1285 m_out.extractValue(result, 0), right, ValueFormatInt52);
1288 result = m_out.subWithOverflow64(left, right);
1290 if (doesKill(m_node->child2())) {
1291 // result = left - right
1292 // result - left = -right
1293 // right = left - result
1294 addAvailableRecovery(
1295 m_node->child2(), SubRecovery,
1296 left, m_out.extractValue(result, 0), ValueFormatInt52);
1297 } else if (doesKill(m_node->child1())) {
1298 // result = left - right
1299 // result + right = left
1300 addAvailableRecovery(
1301 m_node->child1(), AddRecovery,
1302 m_out.extractValue(result, 0), right, ValueFormatInt52);
1306 speculate(Int52Overflow, noValue(), 0, m_out.extractValue(result, 1));
1307 setInt52(m_out.extractValue(result, 0));
1311 case DoubleRepUse: {
1312 LValue C1 = lowDouble(m_node->child1());
1313 LValue C2 = lowDouble(m_node->child2());
1315 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
1320 DFG_CRASH(m_graph, m_node, "Bad use kind");
1325 void compileArithClz32()
1327 LValue operand = lowInt32(m_node->child1());
1328 LValue isZeroUndef = m_out.booleanFalse;
1329 setInt32(m_out.ctlz32(operand, isZeroUndef));
1332 void compileArithMul()
1334 switch (m_node->binaryUseKind()) {
1336 LValue left = lowInt32(m_node->child1());
1337 LValue right = lowInt32(m_node->child2());
1341 if (!shouldCheckOverflow(m_node->arithMode()))
1342 result = m_out.mul(left, right);
1344 LValue overflowResult = m_out.mulWithOverflow32(left, right);
1345 speculate(Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
1346 result = m_out.extractValue(overflowResult, 0);
1349 if (shouldCheckNegativeZero(m_node->arithMode())) {
1350 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ArithMul slow case"));
1351 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMul continuation"));
1354 m_out.notZero32(result), usually(continuation), rarely(slowCase));
1356 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
1357 LValue cond = m_out.bitOr(m_out.lessThan(left, m_out.int32Zero), m_out.lessThan(right, m_out.int32Zero));
1358 speculate(NegativeZero, noValue(), 0, cond);
1359 m_out.jump(continuation);
1360 m_out.appendTo(continuation, lastNext);
1369 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1370 LValue right = lowInt52(m_node->child2(), opposite(kind));
1372 LValue overflowResult = m_out.mulWithOverflow64(left, right);
1373 speculate(Int52Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
1374 LValue result = m_out.extractValue(overflowResult, 0);
1376 if (shouldCheckNegativeZero(m_node->arithMode())) {
1377 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ArithMul slow case"));
1378 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMul continuation"));
1381 m_out.notZero64(result), usually(continuation), rarely(slowCase));
1383 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
1384 LValue cond = m_out.bitOr(m_out.lessThan(left, m_out.int64Zero), m_out.lessThan(right, m_out.int64Zero));
1385 speculate(NegativeZero, noValue(), 0, cond);
1386 m_out.jump(continuation);
1387 m_out.appendTo(continuation, lastNext);
1394 case DoubleRepUse: {
1396 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1401 DFG_CRASH(m_graph, m_node, "Bad use kind");
1406 void compileArithDiv()
1408 switch (m_node->binaryUseKind()) {
1410 LValue numerator = lowInt32(m_node->child1());
1411 LValue denominator = lowInt32(m_node->child2());
1413 LBasicBlock unsafeDenominator = FTL_NEW_BLOCK(m_out, ("ArithDiv unsafe denominator"));
1414 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithDiv continuation"));
1415 LBasicBlock done = FTL_NEW_BLOCK(m_out, ("ArithDiv done"));
1417 Vector<ValueFromBlock, 3> results;
1419 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
1422 m_out.above(adjustedDenominator, m_out.int32One),
1423 usually(continuation), rarely(unsafeDenominator));
1425 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
1427 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
1429 if (shouldCheckOverflow(m_node->arithMode())) {
1430 LValue cond = m_out.bitOr(m_out.isZero32(denominator), m_out.equal(numerator, neg2ToThe31));
1431 speculate(Overflow, noValue(), 0, cond);
1432 m_out.jump(continuation);
1434 // This is the case where we convert the result to an int after we're done. So,
1435 // if the denominator is zero, then the result should be zero.
1436 // If the denominator is not zero (i.e. it's -1 because we're guarded by the
1437 // check above) and the numerator is -2^31 then the result should be -2^31.
1439 LBasicBlock divByZero = FTL_NEW_BLOCK(m_out, ("ArithDiv divide by zero"));
1440 LBasicBlock notDivByZero = FTL_NEW_BLOCK(m_out, ("ArithDiv not divide by zero"));
1441 LBasicBlock neg2ToThe31ByNeg1 = FTL_NEW_BLOCK(m_out, ("ArithDiv -2^31/-1"));
1444 m_out.isZero32(denominator), rarely(divByZero), usually(notDivByZero));
1446 m_out.appendTo(divByZero, notDivByZero);
1447 results.append(m_out.anchor(m_out.int32Zero));
1450 m_out.appendTo(notDivByZero, neg2ToThe31ByNeg1);
1452 m_out.equal(numerator, neg2ToThe31),
1453 rarely(neg2ToThe31ByNeg1), usually(continuation));
1455 m_out.appendTo(neg2ToThe31ByNeg1, continuation);
1456 results.append(m_out.anchor(neg2ToThe31));
1460 m_out.appendTo(continuation, done);
1462 if (shouldCheckNegativeZero(m_node->arithMode())) {
1463 LBasicBlock zeroNumerator = FTL_NEW_BLOCK(m_out, ("ArithDiv zero numerator"));
1464 LBasicBlock numeratorContinuation = FTL_NEW_BLOCK(m_out, ("ArithDiv numerator continuation"));
1467 m_out.isZero32(numerator),
1468 rarely(zeroNumerator), usually(numeratorContinuation));
1470 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
1473 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
1475 m_out.jump(numeratorContinuation);
1477 m_out.appendTo(numeratorContinuation, innerLastNext);
1480 LValue result = m_out.div(numerator, denominator);
1482 if (shouldCheckOverflow(m_node->arithMode())) {
1484 Overflow, noValue(), 0,
1485 m_out.notEqual(m_out.mul(result, denominator), numerator));
1488 results.append(m_out.anchor(result));
1491 m_out.appendTo(done, lastNext);
1493 setInt32(m_out.phi(m_out.int32, results));
1497 case DoubleRepUse: {
1498 setDouble(m_out.doubleDiv(
1499 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1504 DFG_CRASH(m_graph, m_node, "Bad use kind");
1509 void compileArithMod()
1511 switch (m_node->binaryUseKind()) {
1513 LValue numerator = lowInt32(m_node->child1());
1514 LValue denominator = lowInt32(m_node->child2());
1516 LBasicBlock unsafeDenominator = FTL_NEW_BLOCK(m_out, ("ArithMod unsafe denominator"));
1517 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMod continuation"));
1518 LBasicBlock done = FTL_NEW_BLOCK(m_out, ("ArithMod done"));
1520 Vector<ValueFromBlock, 3> results;
1522 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
1525 m_out.above(adjustedDenominator, m_out.int32One),
1526 usually(continuation), rarely(unsafeDenominator));
1528 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
1530 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
1532 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
1533 // separate case for that. But it probably doesn't matter so much.
1534 if (shouldCheckOverflow(m_node->arithMode())) {
1535 LValue cond = m_out.bitOr(m_out.isZero32(denominator), m_out.equal(numerator, neg2ToThe31));
1536 speculate(Overflow, noValue(), 0, cond);
1537 m_out.jump(continuation);
1539 // This is the case where we convert the result to an int after we're done. So,
1540 // if the denominator is zero, then the result should be result should be zero.
1541 // If the denominator is not zero (i.e. it's -1 because we're guarded by the
1542 // check above) and the numerator is -2^31 then the result should be -2^31.
1544 LBasicBlock modByZero = FTL_NEW_BLOCK(m_out, ("ArithMod modulo by zero"));
1545 LBasicBlock notModByZero = FTL_NEW_BLOCK(m_out, ("ArithMod not modulo by zero"));
1546 LBasicBlock neg2ToThe31ByNeg1 = FTL_NEW_BLOCK(m_out, ("ArithMod -2^31/-1"));
1549 m_out.isZero32(denominator), rarely(modByZero), usually(notModByZero));
1551 m_out.appendTo(modByZero, notModByZero);
1552 results.append(m_out.anchor(m_out.int32Zero));
1555 m_out.appendTo(notModByZero, neg2ToThe31ByNeg1);
1557 m_out.equal(numerator, neg2ToThe31),
1558 rarely(neg2ToThe31ByNeg1), usually(continuation));
1560 m_out.appendTo(neg2ToThe31ByNeg1, continuation);
1561 results.append(m_out.anchor(m_out.int32Zero));
1565 m_out.appendTo(continuation, done);
1567 LValue remainder = m_out.rem(numerator, denominator);
1569 if (shouldCheckNegativeZero(m_node->arithMode())) {
1570 LBasicBlock negativeNumerator = FTL_NEW_BLOCK(m_out, ("ArithMod negative numerator"));
1571 LBasicBlock numeratorContinuation = FTL_NEW_BLOCK(m_out, ("ArithMod numerator continuation"));
1574 m_out.lessThan(numerator, m_out.int32Zero),
1575 unsure(negativeNumerator), unsure(numeratorContinuation));
1577 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
1579 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
1581 m_out.jump(numeratorContinuation);
1583 m_out.appendTo(numeratorContinuation, innerLastNext);
1586 results.append(m_out.anchor(remainder));
1589 m_out.appendTo(done, lastNext);
1591 setInt32(m_out.phi(m_out.int32, results));
1595 case DoubleRepUse: {
1597 m_out.doubleRem(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1602 DFG_CRASH(m_graph, m_node, "Bad use kind");
1607 void compileArithMinOrMax()
1609 switch (m_node->binaryUseKind()) {
1611 LValue left = lowInt32(m_node->child1());
1612 LValue right = lowInt32(m_node->child2());
1616 m_node->op() == ArithMin
1617 ? m_out.lessThan(left, right)
1618 : m_out.lessThan(right, left),
1623 case DoubleRepUse: {
1624 LValue left = lowDouble(m_node->child1());
1625 LValue right = lowDouble(m_node->child2());
1627 LBasicBlock notLessThan = FTL_NEW_BLOCK(m_out, ("ArithMin/ArithMax not less than"));
1628 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithMin/ArithMax continuation"));
1630 Vector<ValueFromBlock, 2> results;
1632 results.append(m_out.anchor(left));
1634 m_node->op() == ArithMin
1635 ? m_out.doubleLessThan(left, right)
1636 : m_out.doubleGreaterThan(left, right),
1637 unsure(continuation), unsure(notLessThan));
1639 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
1640 results.append(m_out.anchor(m_out.select(
1641 m_node->op() == ArithMin
1642 ? m_out.doubleGreaterThanOrEqual(left, right)
1643 : m_out.doubleLessThanOrEqual(left, right),
1644 right, m_out.constDouble(PNaN))));
1645 m_out.jump(continuation);
1647 m_out.appendTo(continuation, lastNext);
1648 setDouble(m_out.phi(m_out.doubleType, results));
1653 DFG_CRASH(m_graph, m_node, "Bad use kind");
1658 void compileArithAbs()
1660 switch (m_node->child1().useKind()) {
1662 LValue value = lowInt32(m_node->child1());
1664 LValue mask = m_out.aShr(value, m_out.constInt32(31));
1665 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
1667 speculate(Overflow, noValue(), 0, m_out.equal(result, m_out.constInt32(1 << 31)));
1673 case DoubleRepUse: {
1674 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
1679 DFG_CRASH(m_graph, m_node, "Bad use kind");
1684 void compileArithSin() { setDouble(m_out.doubleSin(lowDouble(m_node->child1()))); }
1686 void compileArithCos() { setDouble(m_out.doubleCos(lowDouble(m_node->child1()))); }
1688 void compileArithPow()
1690 // FIXME: investigate llvm.powi to better understand its performance characteristics.
1691 // It might be better to have the inline loop in DFG too.
1692 if (m_node->child2().useKind() == Int32Use)
1693 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
1695 LValue base = lowDouble(m_node->child1());
1696 LValue exponent = lowDouble(m_node->child2());
1698 LBasicBlock integerExponentIsSmallBlock = FTL_NEW_BLOCK(m_out, ("ArithPow test integer exponent is small."));
1699 LBasicBlock integerExponentPowBlock = FTL_NEW_BLOCK(m_out, ("ArithPow pow(double, (int)double)."));
1700 LBasicBlock doubleExponentPowBlockEntry = FTL_NEW_BLOCK(m_out, ("ArithPow pow(double, double)."));
1701 LBasicBlock nanExceptionExponentIsInfinity = FTL_NEW_BLOCK(m_out, ("ArithPow NaN Exception, check exponent is infinity."));
1702 LBasicBlock nanExceptionBaseIsOne = FTL_NEW_BLOCK(m_out, ("ArithPow NaN Exception, check base is one."));
1703 LBasicBlock powBlock = FTL_NEW_BLOCK(m_out, ("ArithPow regular pow"));
1704 LBasicBlock nanExceptionResultIsNaN = FTL_NEW_BLOCK(m_out, ("ArithPow NaN Exception, result is NaN."));
1705 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArithPow continuation"));
1707 LValue integerExponent = m_out.fpToInt32(exponent);
1708 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
1709 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
1710 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
1712 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
1713 LValue integerExponentBelow1000 = m_out.below(integerExponent, m_out.constInt32(1000));
1714 m_out.branch(integerExponentBelow1000, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
1716 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
1717 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
1718 m_out.jump(continuation);
1720 // If y is NaN, the result is NaN.
1721 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionExponentIsInfinity);
1722 LValue exponentIsNaN;
1723 if (m_state.forNode(m_node->child2()).m_type & SpecDoubleNaN)
1724 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
1726 exponentIsNaN = m_out.booleanFalse;
1727 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionExponentIsInfinity));
1729 // If abs(x) is 1 and y is +infinity, the result is NaN.
1730 // If abs(x) is 1 and y is -infinity, the result is NaN.
1731 m_out.appendTo(nanExceptionExponentIsInfinity, nanExceptionBaseIsOne);
1732 LValue absoluteExponent = m_out.doubleAbs(exponent);
1733 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
1734 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionBaseIsOne), usually(powBlock));
1736 m_out.appendTo(nanExceptionBaseIsOne, powBlock);
1737 LValue absoluteBase = m_out.doubleAbs(base);
1738 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
1739 m_out.branch(absoluteBaseIsOne, unsure(nanExceptionResultIsNaN), unsure(powBlock));
1741 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
1742 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
1743 m_out.jump(continuation);
1745 m_out.appendTo(nanExceptionResultIsNaN, continuation);
1746 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
1747 m_out.jump(continuation);
1749 m_out.appendTo(continuation, lastNext);
1750 setDouble(m_out.phi(m_out.doubleType, powDoubleIntResult, powResult, pureNan));
1754 void compileArithSqrt() { setDouble(m_out.doubleSqrt(lowDouble(m_node->child1()))); }
1756 void compileArithLog() { setDouble(m_out.doubleLog(lowDouble(m_node->child1()))); }
1758 void compileArithFRound()
1760 LValue floatValue = m_out.fpCast(lowDouble(m_node->child1()), m_out.floatType);
1761 setDouble(m_out.fpCast(floatValue, m_out.doubleType));
1764 void compileArithNegate()
1766 switch (m_node->child1().useKind()) {
1768 LValue value = lowInt32(m_node->child1());
1771 if (!shouldCheckOverflow(m_node->arithMode()))
1772 result = m_out.neg(value);
1773 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
1774 // We don't have a negate-with-overflow intrinsic. Hopefully this
1775 // does the trick, though.
1776 LValue overflowResult = m_out.subWithOverflow32(m_out.int32Zero, value);
1777 speculate(Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
1778 result = m_out.extractValue(overflowResult, 0);
1780 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
1781 result = m_out.neg(value);
1789 if (!m_state.forNode(m_node->child1()).couldBeType(SpecInt52)) {
1791 LValue value = lowWhicheverInt52(m_node->child1(), kind);
1792 LValue result = m_out.neg(value);
1793 if (shouldCheckNegativeZero(m_node->arithMode()))
1794 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
1795 setInt52(result, kind);
1799 LValue value = lowInt52(m_node->child1());
1800 LValue overflowResult = m_out.subWithOverflow64(m_out.int64Zero, value);
1801 speculate(Int52Overflow, noValue(), 0, m_out.extractValue(overflowResult, 1));
1802 LValue result = m_out.extractValue(overflowResult, 0);
1803 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
1808 case DoubleRepUse: {
1809 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
1814 DFG_CRASH(m_graph, m_node, "Bad use kind");
1819 void compileBitAnd()
1821 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
1826 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
1829 void compileBitXor()
1831 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
1834 void compileBitRShift()
1836 setInt32(m_out.aShr(
1837 lowInt32(m_node->child1()),
1838 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
1841 void compileBitLShift()
1844 lowInt32(m_node->child1()),
1845 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
1848 void compileBitURShift()
1850 setInt32(m_out.lShr(
1851 lowInt32(m_node->child1()),
1852 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
1855 void compileUInt32ToNumber()
1857 LValue value = lowInt32(m_node->child1());
1859 if (doesOverflow(m_node->arithMode())) {
1860 setDouble(m_out.unsignedToDouble(value));
1864 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
1868 void compileCheckStructure()
1870 LValue cell = lowCell(m_node->child1());
1873 if (m_node->child1()->hasConstant())
1874 exitKind = BadConstantCache;
1876 exitKind = BadCache;
1878 LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
1880 checkStructure(structureID, jsValueValue(cell), exitKind, m_node->structureSet());
1883 void compileCheckCell()
1885 LValue cell = lowCell(m_node->child1());
1888 BadCell, jsValueValue(cell), m_node->child1().node(),
1889 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
1892 void compileCheckBadCell()
1897 void compileCheckNotEmpty()
1899 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
1902 void compileGetExecutable()
1904 LValue cell = lowCell(m_node->child1());
1905 speculateFunction(m_node->child1(), cell);
1906 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
1909 void compileArrayifyToStructure()
1911 LValue cell = lowCell(m_node->child1());
1912 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
1914 LBasicBlock unexpectedStructure = FTL_NEW_BLOCK(m_out, ("ArrayifyToStructure unexpected structure"));
1915 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArrayifyToStructure continuation"));
1917 LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
1920 m_out.notEqual(structureID, weakStructureID(m_node->structure())),
1921 rarely(unexpectedStructure), usually(continuation));
1923 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
1926 switch (m_node->arrayMode().type()) {
1929 case Array::Contiguous:
1931 Uncountable, noValue(), 0,
1932 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
1939 switch (m_node->arrayMode().type()) {
1941 vmCall(m_out.operation(operationEnsureInt32), m_callFrame, cell);
1944 vmCall(m_out.operation(operationEnsureDouble), m_callFrame, cell);
1946 case Array::Contiguous:
1947 if (m_node->arrayMode().conversion() == Array::RageConvert)
1948 vmCall(m_out.operation(operationRageEnsureContiguous), m_callFrame, cell);
1950 vmCall(m_out.operation(operationEnsureContiguous), m_callFrame, cell);
1952 case Array::ArrayStorage:
1953 case Array::SlowPutArrayStorage:
1954 vmCall(m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
1957 DFG_CRASH(m_graph, m_node, "Bad array type");
1961 structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
1963 BadIndexingType, jsValueValue(cell), 0,
1964 m_out.notEqual(structureID, weakStructureID(m_node->structure())));
1965 m_out.jump(continuation);
1967 m_out.appendTo(continuation, lastNext);
1970 void compilePutStructure()
1972 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
1974 Structure* oldStructure = m_node->transition()->previous;
1975 Structure* newStructure = m_node->transition()->next;
1976 ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
1977 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
1978 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
1980 LValue cell = lowCell(m_node->child1());
1982 weakStructureID(newStructure),
1983 cell, m_heaps.JSCell_structureID);
1986 void compileGetById()
1988 // Pretty much the only reason why we don't also support GetByIdFlush is because:
1989 // https://bugs.webkit.org/show_bug.cgi?id=125711
1991 switch (m_node->child1().useKind()) {
1993 setJSValue(getById(lowCell(m_node->child1())));
1998 // This is pretty weird, since we duplicate the slow path both here and in the
1999 // code generated by the IC. We should investigate making this less bad.
2000 // https://bugs.webkit.org/show_bug.cgi?id=127830
2001 LValue value = lowJSValue(m_node->child1());
2003 LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("GetById untyped cell case"));
2004 LBasicBlock notCellCase = FTL_NEW_BLOCK(m_out, ("GetById untyped not cell case"));
2005 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetById untyped continuation"));
2007 m_out.branch(isCell(value), unsure(cellCase), unsure(notCellCase));
2009 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2010 ValueFromBlock cellResult = m_out.anchor(getById(value));
2011 m_out.jump(continuation);
2013 m_out.appendTo(notCellCase, continuation);
2014 ValueFromBlock notCellResult = m_out.anchor(vmCall(
2015 m_out.operation(operationGetById),
2016 m_callFrame, getUndef(m_out.intPtr), value,
2017 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
2018 m_out.jump(continuation);
2020 m_out.appendTo(continuation, lastNext);
2021 setJSValue(m_out.phi(m_out.int64, cellResult, notCellResult));
2026 DFG_CRASH(m_graph, m_node, "Bad use kind");
2031 void compilePutById()
2033 // See above; CellUse is easier so we do only that for now.
2034 ASSERT(m_node->child1().useKind() == CellUse);
2036 LValue base = lowCell(m_node->child1());
2037 LValue value = lowJSValue(m_node->child2());
2038 StringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
2040 // Arguments: id, bytes, target, numArgs, args...
2041 unsigned stackmapID = m_stackmapIDs++;
2043 if (verboseCompilationEnabled())
2044 dataLog(" Emitting PutById patchpoint with stackmap #", stackmapID, "\n");
2046 LValue call = m_out.call(
2047 m_out.patchpointVoidIntrinsic(),
2048 m_out.constInt64(stackmapID), m_out.constInt32(sizeOfPutById()),
2049 constNull(m_out.ref8), m_out.constInt32(2), base, value);
2050 setInstructionCallingConvention(call, LLVMAnyRegCallConv);
2052 m_ftlState.putByIds.append(PutByIdDescriptor(
2053 stackmapID, m_node->origin.semantic, uid,
2054 m_graph.executableFor(m_node->origin.semantic)->ecmaMode(),
2055 m_node->op() == PutByIdDirect ? Direct : NotDirect));
2058 void compileGetButterfly()
2060 setStorage(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly));
2063 void compileConstantStoragePointer()
2065 setStorage(m_out.constIntPtr(m_node->storagePointer()));
2068 void compileGetIndexedPropertyStorage()
2070 LValue cell = lowCell(m_node->child1());
2072 if (m_node->arrayMode().type() == Array::String) {
2073 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("GetIndexedPropertyStorage String slow case"));
2074 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetIndexedPropertyStorage String continuation"));
2076 ValueFromBlock fastResult = m_out.anchor(
2077 m_out.loadPtr(cell, m_heaps.JSString_value));
2080 m_out.notNull(fastResult.value()), usually(continuation), rarely(slowPath));
2082 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
2084 ValueFromBlock slowResult = m_out.anchor(
2085 vmCall(m_out.operation(operationResolveRope), m_callFrame, cell));
2087 m_out.jump(continuation);
2089 m_out.appendTo(continuation, lastNext);
2091 setStorage(m_out.loadPtr(m_out.phi(m_out.intPtr, fastResult, slowResult), m_heaps.StringImpl_data));
2095 setStorage(m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector));
2098 void compileCheckArray()
2100 Edge edge = m_node->child1();
2101 LValue cell = lowCell(edge);
2103 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, m_state.forNode(edge)))
2107 BadIndexingType, jsValueValue(cell), 0,
2108 m_out.bitNot(isArrayType(cell, m_node->arrayMode())));
2111 void compileGetTypedArrayByteOffset()
2113 LValue basePtr = lowCell(m_node->child1());
2115 LBasicBlock simpleCase = FTL_NEW_BLOCK(m_out, ("wasteless typed array"));
2116 LBasicBlock wastefulCase = FTL_NEW_BLOCK(m_out, ("wasteful typed array"));
2117 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("continuation branch"));
2119 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
2121 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
2122 unsure(simpleCase), unsure(wastefulCase));
2124 // begin simple case
2125 LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
2127 ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
2129 m_out.jump(continuation);
2131 // begin wasteful case
2132 m_out.appendTo(wastefulCase, continuation);
2134 LValue vectorPtr = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
2135 LValue butterflyPtr = m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly);
2136 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
2137 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
2139 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
2141 m_out.jump(continuation);
2142 m_out.appendTo(continuation, lastNext);
2145 setInt32(m_out.castToInt32(m_out.phi(m_out.intPtr, simpleOut, wastefulOut)));
2148 void compileGetArrayLength()
2150 switch (m_node->arrayMode().type()) {
2153 case Array::Contiguous: {
2154 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
2158 case Array::String: {
2159 LValue string = lowCell(m_node->child1());
2160 setInt32(m_out.load32NonNegative(string, m_heaps.JSString_length));
2164 case Array::DirectArguments: {
2165 LValue arguments = lowCell(m_node->child1());
2167 ExoticObjectMode, noValue(), nullptr,
2168 m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_overrides)));
2169 setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
2173 case Array::ScopedArguments: {
2174 LValue arguments = lowCell(m_node->child1());
2176 ExoticObjectMode, noValue(), nullptr,
2177 m_out.notZero8(m_out.load8(arguments, m_heaps.ScopedArguments_overrodeThings)));
2178 setInt32(m_out.load32NonNegative(arguments, m_heaps.ScopedArguments_totalLength));
2183 if (isTypedView(m_node->arrayMode().typedArrayType())) {
2185 m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
2189 DFG_CRASH(m_graph, m_node, "Bad array type");
2194 void compileCheckInBounds()
2197 OutOfBounds, noValue(), 0,
2198 m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2201 void compileGetByVal()
2203 switch (m_node->arrayMode().type()) {
2205 case Array::Contiguous: {
2206 LValue index = lowInt32(m_node->child2());
2207 LValue storage = lowStorage(m_node->child3());
2209 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
2210 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
2212 if (m_node->arrayMode().isInBounds()) {
2213 LValue result = m_out.load64(baseIndex(heap, storage, index, m_node->child2()));
2214 speculate(LoadFromHole, noValue(), 0, m_out.isZero64(result));
2219 LValue base = lowCell(m_node->child1());
2221 LBasicBlock fastCase = FTL_NEW_BLOCK(m_out, ("GetByVal int/contiguous fast case"));
2222 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("GetByVal int/contiguous slow case"));
2223 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal int/contiguous continuation"));
2227 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
2228 rarely(slowCase), usually(fastCase));
2230 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
2232 ValueFromBlock fastResult = m_out.anchor(
2233 m_out.load64(baseIndex(heap, storage, index, m_node->child2())));
2235 m_out.isZero64(fastResult.value()), rarely(slowCase), usually(continuation));
2237 m_out.appendTo(slowCase, continuation);
2238 ValueFromBlock slowResult = m_out.anchor(
2239 vmCall(m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
2240 m_out.jump(continuation);
2242 m_out.appendTo(continuation, lastNext);
2243 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
2247 case Array::Double: {
2248 LValue index = lowInt32(m_node->child2());
2249 LValue storage = lowStorage(m_node->child3());
2251 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
2253 if (m_node->arrayMode().isInBounds()) {
2254 LValue result = m_out.loadDouble(
2255 baseIndex(heap, storage, index, m_node->child2()));
2257 if (!m_node->arrayMode().isSaneChain()) {
2259 LoadFromHole, noValue(), 0,
2260 m_out.doubleNotEqualOrUnordered(result, result));
2266 LValue base = lowCell(m_node->child1());
2268 LBasicBlock inBounds = FTL_NEW_BLOCK(m_out, ("GetByVal double in bounds"));
2269 LBasicBlock boxPath = FTL_NEW_BLOCK(m_out, ("GetByVal double boxing"));
2270 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("GetByVal double slow case"));
2271 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal double continuation"));
2275 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
2276 rarely(slowCase), usually(inBounds));
2278 LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
2279 LValue doubleValue = m_out.loadDouble(
2280 baseIndex(heap, storage, index, m_node->child2()));
2282 m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue),
2283 rarely(slowCase), usually(boxPath));
2285 m_out.appendTo(boxPath, slowCase);
2286 ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue));
2287 m_out.jump(continuation);
2289 m_out.appendTo(slowCase, continuation);
2290 ValueFromBlock slowResult = m_out.anchor(
2291 vmCall(m_out.operation(operationGetByValArrayInt), m_callFrame, base, index));
2292 m_out.jump(continuation);
2294 m_out.appendTo(continuation, lastNext);
2295 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
2299 case Array::DirectArguments: {
2300 LValue base = lowCell(m_node->child1());
2301 LValue index = lowInt32(m_node->child2());
2304 ExoticObjectMode, noValue(), nullptr,
2305 m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_overrides)));
2307 ExoticObjectMode, noValue(), nullptr,
2310 m_out.load32NonNegative(base, m_heaps.DirectArguments_length)));
2312 TypedPointer address = m_out.baseIndex(
2313 m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
2314 setJSValue(m_out.load64(address));
2318 case Array::ScopedArguments: {
2319 LValue base = lowCell(m_node->child1());
2320 LValue index = lowInt32(m_node->child2());
2323 ExoticObjectMode, noValue(), nullptr,
2326 m_out.load32NonNegative(base, m_heaps.ScopedArguments_totalLength)));
2328 LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
2329 LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
2331 LBasicBlock namedCase = FTL_NEW_BLOCK(m_out, ("GetByVal ScopedArguments named case"));
2332 LBasicBlock overflowCase = FTL_NEW_BLOCK(m_out, ("GetByVal ScopedArguments overflow case"));
2333 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("GetByVal ScopedArguments continuation"));
2336 m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
2338 LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
2340 LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
2341 LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
2343 TypedPointer address = m_out.baseIndex(
2344 m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
2345 LValue scopeOffset = m_out.load32(address);
2348 ExoticObjectMode, noValue(), nullptr,
2349 m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
2351 address = m_out.baseIndex(
2352 m_heaps.JSEnvironmentRecord_variables, scope, m_out.zeroExtPtr(scopeOffset));
2353 ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
2354 m_out.jump(continuation);
2356 m_out.appendTo(overflowCase, continuation);
2358 address = m_out.baseIndex(
2359 m_heaps.ScopedArguments_overflowStorage, base,
2360 m_out.zeroExtPtr(m_out.sub(index, namedLength)));
2361 LValue overflowValue = m_out.load64(address);
2362 speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
2363 ValueFromBlock overflowResult = m_out.anchor(overflowValue);
2364 m_out.jump(continuation);
2366 m_out.appendTo(continuation, lastNext);
2367 setJSValue(m_out.phi(m_out.int64, namedResult, overflowResult));
2371 case Array::Generic: {
2373 m_out.operation(operationGetByVal), m_callFrame,
2374 lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
2378 case Array::String: {
2379 compileStringCharAt();
2384 LValue index = lowInt32(m_node->child2());
2385 LValue storage = lowStorage(m_node->child3());
2387 TypedArrayType type = m_node->arrayMode().typedArrayType();
2389 if (isTypedView(type)) {
2390 TypedPointer pointer = TypedPointer(
2391 m_heaps.typedArrayProperties,
2395 m_out.zeroExtPtr(index),
2396 m_out.constIntPtr(logElementSize(type)))));
2400 switch (elementSize(type)) {
2402 result = m_out.load8(pointer);
2405 result = m_out.load16(pointer);
2408 result = m_out.load32(pointer);
2411 DFG_CRASH(m_graph, m_node, "Bad element size");
2414 if (elementSize(type) < 4) {
2416 result = m_out.signExt(result, m_out.int32);
2418 result = m_out.zeroExt(result, m_out.int32);
2423 if (isSigned(type)) {
2428 if (m_node->shouldSpeculateInt32()) {
2430 Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2435 if (m_node->shouldSpeculateMachineInt()) {
2436 setStrictInt52(m_out.zeroExt(result, m_out.int64));
2440 setDouble(m_out.unsignedToFP(result, m_out.doubleType));
2444 ASSERT(isFloat(type));
2449 result = m_out.fpCast(m_out.loadFloat(pointer), m_out.doubleType);
2452 result = m_out.loadDouble(pointer);
2455 DFG_CRASH(m_graph, m_node, "Bad typed array type");
2462 DFG_CRASH(m_graph, m_node, "Bad array type");
2467 void compileGetMyArgumentByVal()
2469 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
2471 LValue index = lowInt32(m_node->child2());
2474 if (inlineCallFrame && !inlineCallFrame->isVarargs())
2475 limit = m_out.constInt32(inlineCallFrame->arguments.size() - 1);
2477 VirtualRegister argumentCountRegister;
2478 if (!inlineCallFrame)
2479 argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
2481 argumentCountRegister = inlineCallFrame->argumentCountRegister;
2482 limit = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
2485 speculate(ExoticObjectMode, noValue(), 0, m_out.aboveOrEqual(index, limit));
2488 if (inlineCallFrame) {
2489 if (inlineCallFrame->arguments.size() <= 1) {
2490 // We should have already exited due to the bounds check, above. Just tell the
2491 // compiler that anything dominated by this instruction is not reachable, so
2492 // that we don't waste time generating such code. This will also plant some
2493 // kind of crashing instruction so that if by some fluke the bounds check didn't
2494 // work, we'll crash in an easy-to-see way.
2495 didAlreadyTerminate();
2498 base = addressFor(inlineCallFrame->arguments[1].virtualRegister());
2500 base = addressFor(virtualRegisterForArgument(1));
2502 LValue pointer = m_out.baseIndex(
2503 base.value(), m_out.zeroExt(index, m_out.intPtr), ScaleEight);
2504 setJSValue(m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer)));
2507 void compilePutByVal()
2509 Edge child1 = m_graph.varArgChild(m_node, 0);
2510 Edge child2 = m_graph.varArgChild(m_node, 1);
2511 Edge child3 = m_graph.varArgChild(m_node, 2);
2512 Edge child4 = m_graph.varArgChild(m_node, 3);
2513 Edge child5 = m_graph.varArgChild(m_node, 4);
2515 switch (m_node->arrayMode().type()) {
2516 case Array::Generic: {
2517 V_JITOperation_EJJJ operation;
2518 if (m_node->op() == PutByValDirect) {
2519 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2520 operation = operationPutByValDirectStrict;
2522 operation = operationPutByValDirectNonStrict;
2524 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2525 operation = operationPutByValStrict;
2527 operation = operationPutByValNonStrict;
2531 m_out.operation(operation), m_callFrame,
2532 lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
2540 LValue base = lowCell(child1);
2541 LValue index = lowInt32(child2);
2542 LValue storage = lowStorage(child4);
2544 switch (m_node->arrayMode().type()) {
2547 case Array::Contiguous: {
2548 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal continuation"));
2549 LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
2551 switch (m_node->arrayMode().type()) {
2553 case Array::Contiguous: {
2554 LValue value = lowJSValue(child3, ManualOperandSpeculation);
2556 if (m_node->arrayMode().type() == Array::Int32)
2557 FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32, isNotInt32(value));
2559 TypedPointer elementPointer = m_out.baseIndex(
2560 m_node->arrayMode().type() == Array::Int32 ?
2561 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
2562 storage, m_out.zeroExtPtr(index), m_state.forNode(child2).m_value);
2564 if (m_node->op() == PutByValAlias) {
2565 m_out.store64(value, elementPointer);
2569 contiguousPutByValOutOfBounds(
2570 codeBlock()->isStrictMode()
2571 ? operationPutByValBeyondArrayBoundsStrict
2572 : operationPutByValBeyondArrayBoundsNonStrict,
2573 base, storage, index, value, continuation);
2575 m_out.store64(value, elementPointer);
2579 case Array::Double: {
2580 LValue value = lowDouble(child3);
2583 doubleValue(value), child3, SpecDoubleReal,
2584 m_out.doubleNotEqualOrUnordered(value, value));
2586 TypedPointer elementPointer = m_out.baseIndex(
2587 m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
2588 m_state.forNode(child2).m_value);
2590 if (m_node->op() == PutByValAlias) {
2591 m_out.storeDouble(value, elementPointer);
2595 contiguousPutByValOutOfBounds(
2596 codeBlock()->isStrictMode()
2597 ? operationPutDoubleByValBeyondArrayBoundsStrict
2598 : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2599 base, storage, index, value, continuation);
2601 m_out.storeDouble(value, elementPointer);
2606 DFG_CRASH(m_graph, m_node, "Bad array type");
2609 m_out.jump(continuation);
2610 m_out.appendTo(continuation, outerLastNext);
2615 TypedArrayType type = m_node->arrayMode().typedArrayType();
2617 if (isTypedView(type)) {
2618 TypedPointer pointer = TypedPointer(
2619 m_heaps.typedArrayProperties,
2623 m_out.zeroExt(index, m_out.intPtr),
2624 m_out.constIntPtr(logElementSize(type)))));
2627 LValue valueToStore;
2631 switch (child3.useKind()) {
2634 if (child3.useKind() == Int32Use)
2635 intValue = lowInt32(child3);
2637 intValue = m_out.castToInt32(lowStrictInt52(child3));
2639 if (isClamped(type)) {
2640 ASSERT(elementSize(type) == 1);
2642 LBasicBlock atLeastZero = FTL_NEW_BLOCK(m_out, ("PutByVal int clamp atLeastZero"));
2643 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal int clamp continuation"));
2645 Vector<ValueFromBlock, 2> intValues;
2646 intValues.append(m_out.anchor(m_out.int32Zero));
2648 m_out.lessThan(intValue, m_out.int32Zero),
2649 unsure(continuation), unsure(atLeastZero));
2651 LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
2653 intValues.append(m_out.anchor(m_out.select(
2654 m_out.greaterThan(intValue, m_out.constInt32(255)),
2655 m_out.constInt32(255),
2657 m_out.jump(continuation);
2659 m_out.appendTo(continuation, lastNext);
2660 intValue = m_out.phi(m_out.int32, intValues);
2665 case DoubleRepUse: {
2666 LValue doubleValue = lowDouble(child3);
2668 if (isClamped(type)) {
2669 ASSERT(elementSize(type) == 1);
2671 LBasicBlock atLeastZero = FTL_NEW_BLOCK(m_out, ("PutByVal double clamp atLeastZero"));
2672 LBasicBlock withinRange = FTL_NEW_BLOCK(m_out, ("PutByVal double clamp withinRange"));
2673 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal double clamp continuation"));
2675 Vector<ValueFromBlock, 3> intValues;
2676 intValues.append(m_out.anchor(m_out.int32Zero));
2678 m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
2679 unsure(continuation), unsure(atLeastZero));
2681 LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
2682 intValues.append(m_out.anchor(m_out.constInt32(255)));
2684 m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
2685 unsure(continuation), unsure(withinRange));
2687 m_out.appendTo(withinRange, continuation);
2688 intValues.append(m_out.anchor(m_out.fpToInt32(doubleValue)));
2689 m_out.jump(continuation);
2691 m_out.appendTo(continuation, lastNext);
2692 intValue = m_out.phi(m_out.int32, intValues);
2694 intValue = doubleToInt32(doubleValue);
2699 DFG_CRASH(m_graph, m_node, "Bad use kind");
2702 switch (elementSize(type)) {
2704 valueToStore = m_out.intCast(intValue, m_out.int8);
2705 refType = m_out.ref8;
2708 valueToStore = m_out.intCast(intValue, m_out.int16);
2709 refType = m_out.ref16;
2712 valueToStore = intValue;
2713 refType = m_out.ref32;
2716 DFG_CRASH(m_graph, m_node, "Bad element size");
2718 } else /* !isInt(type) */ {
2719 LValue value = lowDouble(child3);
2722 valueToStore = m_out.fpCast(value, m_out.floatType);
2723 refType = m_out.refFloat;
2726 valueToStore = value;
2727 refType = m_out.refDouble;
2730 DFG_CRASH(m_graph, m_node, "Bad typed array type");
2734 if (m_node->arrayMode().isInBounds() || m_node->op() == PutByValAlias)
2735 m_out.store(valueToStore, pointer, refType);
2737 LBasicBlock isInBounds = FTL_NEW_BLOCK(m_out, ("PutByVal typed array in bounds case"));
2738 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("PutByVal typed array continuation"));
2741 m_out.aboveOrEqual(index, lowInt32(child5)),
2742 unsure(continuation), unsure(isInBounds));
2744 LBasicBlock lastNext = m_out.appendTo(isInBounds, continuation);
2745 m_out.store(valueToStore, pointer, refType);
2746 m_out.jump(continuation);
2748 m_out.appendTo(continuation, lastNext);
2754 DFG_CRASH(m_graph, m_node, "Bad array type");
2759 void compileArrayPush()
2761 LValue base = lowCell(m_node->child1());
2762 LValue storage = lowStorage(m_node->child3());
2764 switch (m_node->arrayMode().type()) {
2766 case Array::Contiguous:
2767 case Array::Double: {
2771 if (m_node->arrayMode().type() != Array::Double) {
2772 value = lowJSValue(m_node->child2(), ManualOperandSpeculation);
2773 if (m_node->arrayMode().type() == Array::Int32) {
2775 jsValueValue(value), m_node->child2(), SpecInt32, isNotInt32(value));
2777 refType = m_out.ref64;
2779 value = lowDouble(m_node->child2());
2781 doubleValue(value), m_node->child2(), SpecDoubleReal,
2782 m_out.doubleNotEqualOrUnordered(value, value));
2783 refType = m_out.refDouble;
2786 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
2788 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
2790 LBasicBlock fastPath = FTL_NEW_BLOCK(m_out, ("ArrayPush fast path"));
2791 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("ArrayPush slow path"));
2792 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArrayPush continuation"));
2796 prevLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength)),
2797 rarely(slowPath), usually(fastPath));
2799 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
2801 value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), refType);
2802 LValue newLength = m_out.add(prevLength, m_out.int32One);
2803 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
2805 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
2806 m_out.jump(continuation);
2808 m_out.appendTo(slowPath, continuation);
2810 if (m_node->arrayMode().type() != Array::Double)
2811 operation = m_out.operation(operationArrayPush);
2813 operation = m_out.operation(operationArrayPushDouble);
2814 ValueFromBlock slowResult = m_out.anchor(
2815 vmCall(operation, m_callFrame, value, base));
2816 m_out.jump(continuation);
2818 m_out.appendTo(continuation, lastNext);
2819 setJSValue(m_out.phi(m_out.int64, fastResult, slowResult));
2824 DFG_CRASH(m_graph, m_node, "Bad array type");
2829 void compileArrayPop()
2831 LValue base = lowCell(m_node->child1());
2832 LValue storage = lowStorage(m_node->child2());
2834 switch (m_node->arrayMode().type()) {
2837 case Array::Contiguous: {
2838 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
2840 LBasicBlock fastCase = FTL_NEW_BLOCK(m_out, ("ArrayPop fast case"));
2841 LBasicBlock slowCase = FTL_NEW_BLOCK(m_out, ("ArrayPop slow case"));
2842 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("ArrayPop continuation"));
2844 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
2846 Vector<ValueFromBlock, 3> results;
2847 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
2849 m_out.isZero32(prevLength), rarely(continuation), usually(fastCase));
2851 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
2852 LValue newLength = m_out.sub(prevLength, m_out.int32One);
2853 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
2854 TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength));
2855 if (m_node->arrayMode().type() != Array::Double) {
2856 LValue result = m_out.load64(pointer);
2857 m_out.store64(m_out.int64Zero, pointer);
2858 results.append(m_out.anchor(result));
2860 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2862 LValue result = m_out.loadDouble(pointer);
2863 m_out.store64(m_out.constInt64(bitwise_cast<int64_t>(PNaN)), pointer);
2864 results.append(m_out.anchor(boxDouble(result)));
2866 m_out.doubleEqual(result, result),
2867 usually(continuation), rarely(slowCase));
2870 m_out.appendTo(slowCase, continuation);
2871 results.append(m_out.anchor(vmCall(
2872 m_out.operation(operationArrayPopAndRecoverLength), m_callFrame, base)));
2873 m_out.jump(continuation);
2875 m_out.appendTo(continuation, lastNext);
2876 setJSValue(m_out.phi(m_out.int64, results));
2881 DFG_CRASH(m_graph, m_node, "Bad array type");
2886 void compileCreateActivation()
2888 LValue scope = lowCell(m_node->child1());
2889 SymbolTable* table = m_graph.symbolTableFor(m_node->origin.semantic);
2890 Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure();
2892 if (table->singletonScope()->isStillValid()) {
2893 LValue callResult = vmCall(
2894 m_out.operation(operationCreateActivationDirect), m_callFrame, weakPointer(structure),
2895 scope, weakPointer(table));
2896 setJSValue(callResult);
2900 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("CreateActivation slow path"));
2901 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CreateActivation continuation"));
2903 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
2905 LValue fastObject = allocateObject<JSLexicalEnvironment>(
2906 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
2908 // We don't need memory barriers since we just fast-created the activation, so the
2909 // activation must be young.
2910 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
2911 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
2913 for (unsigned i = 0; i < table->scopeSize(); ++i) {
2915 m_out.constInt64(JSValue::encode(jsUndefined())),
2916 fastObject, m_heaps.JSEnvironmentRecord_variables[i]);
2919 ValueFromBlock fastResult = m_out.anchor(fastObject);
2920 m_out.jump(continuation);
2922 m_out.appendTo(slowPath, continuation);
2923 LValue callResult = vmCall(
2924 m_out.operation(operationCreateActivationDirect), m_callFrame, weakPointer(structure),
2925 scope, weakPointer(table));
2926 ValueFromBlock slowResult = m_out.anchor(callResult);
2927 m_out.jump(continuation);
2929 m_out.appendTo(continuation, lastNext);
2930 setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult));
2933 void compileNewFunction()
2935 LValue scope = lowCell(m_node->child1());
2936 FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
2937 if (executable->singletonFunction()->isStillValid()) {
2938 LValue callResult = vmCall(
2939 m_out.operation(operationNewFunction), m_callFrame, scope, weakPointer(executable));
2940 setJSValue(callResult);
2944 Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->functionStructure();
2946 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("NewFunction slow path"));
2947 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("NewFunction continuation"));
2949 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
2951 LValue fastObject = allocateObject<JSFunction>(
2952 structure, m_out.intPtrZero, slowPath);
2954 // We don't need memory barriers since we just fast-created the function, so it
2956 m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
2957 m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable);
2958 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData);
2960 ValueFromBlock fastResult = m_out.anchor(fastObject);
2961 m_out.jump(continuation);
2963 m_out.appendTo(slowPath, continuation);
2964 LValue callResult = vmCall(
2965 m_out.operation(operationNewFunctionWithInvalidatedReallocationWatchpoint),
2966 m_callFrame, scope, weakPointer(executable));
2967 ValueFromBlock slowResult = m_out.anchor(callResult);
2968 m_out.jump(continuation);
2970 m_out.appendTo(continuation, lastNext);
2971 setJSValue(m_out.phi(m_out.intPtr, fastResult, slowResult));
2974 void compileCreateDirectArguments()
2976 // FIXME: A more effective way of dealing with the argument count and callee is to have
2977 // them be explicit arguments to this node.
2978 // https://bugs.webkit.org/show_bug.cgi?id=142207
2980 Structure* structure =
2981 m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure();
2983 unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1;
2985 LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments slow path"));
2986 LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments continuation"));
2988 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
2990 ArgumentsLength length = getArgumentsLength();
2993 if (length.isKnown) {
2994 fastObject = allocateObject<DirectArguments>(
2995 DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure,
2996 m_out.intPtrZero, slowPath);
2998 LValue size = m_out.add(
2999 m_out.shl(length.value, m_out.constInt32(3)),
3000 m_out.constInt32(DirectArguments::storageOffset()));
3002 size = m_out.select(
3003 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
3004 size, m_out.constInt32(DirectArguments::allocationSize(minCapacity)));
3006 fastObject = allocateVariableSizedObject<DirectArguments>(
3007 size, structure, m_out.intPtrZero, slowPath);
3010 m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length);
3011 m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity);
3012 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_overrides);
3014 ValueFromBlock fastResult = m_out.anchor(fastObject);
3015 m_out.jump(continuation);
3017 m_out.appendTo(slowPath, continuation);
3018 LValue callResult = vmCall(
3019 m_out.operation(operationCreateDirectArguments), m_callFrame, weakPointer(structure),
3020 length.value, m_out.constInt32(minCapacity));
3021 ValueFromBlock slowResult = m_out.anchor(callResult);
3022 m_out.jump(continuation);
3024 m_out.appendTo(continuation, lastNext);
3025 LValue result = m_out.phi(m_out.intPtr, fastResult, slowResult);
3027 m_out.storePtr(getCurrentCallee(), result, m_heaps.DirectArguments_callee);
3029 if (length.isKnown) {
3030 VirtualRegister start = AssemblyHelpers::argumentsStart(m_node->origin.semantic);
3031 for (unsigned i = 0; i < std::max(length.known, minCapacity); ++i) {
3033 m_out.load64(addressFor(start + i)),
3034 result, m_heaps.DirectArguments_storage[i]);
3037 LValue stackBase = getArgumentsStart();
3039 LBasicBlock loop = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments loop body"));
3040 LBasicBlock end = FTL_NEW_BLOCK(m_out, ("CreateDirectArguments loop end"));
3042 ValueFromBlock originalLength;
3044 LValue capacity = m_out.select(
3045 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
3047 m_out.constInt32(minCapacity));
3048 originalLength = m_out.anchor(m_out.zeroExtPtr(capacity));
3051 originalLength = m_out.anchor(m_out.zeroExtPtr(length.value));
3052 m_out.branch(m_out.isNull(originalLength.value()), unsure(end), unsure(loop));
3055 lastNext = m_out.appendTo(loop, end);
3056 LValue previousIndex