2 * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLLowerDFGToB3.h"
31 #include "AirGenerationContext.h"
32 #include "AllowMacroScratchRegisterUsage.h"
33 #include "B3CheckValue.h"
34 #include "B3FenceValue.h"
35 #include "B3PatchpointValue.h"
36 #include "B3SlotBaseValue.h"
37 #include "B3StackmapGenerationParams.h"
38 #include "B3ValueInlines.h"
39 #include "CallFrameShuffler.h"
40 #include "CodeBlockWithJITType.h"
41 #include "DFGAbstractInterpreterInlines.h"
42 #include "DFGCapabilities.h"
43 #include "DFGDominators.h"
44 #include "DFGInPlaceAbstractState.h"
45 #include "DFGOSRAvailabilityAnalysisPhase.h"
46 #include "DFGOSRExitFuzz.h"
47 #include "DOMJITPatchpoint.h"
48 #include "DirectArguments.h"
49 #include "FTLAbstractHeapRepository.h"
50 #include "FTLAvailableRecovery.h"
51 #include "FTLDOMJITPatchpointParams.h"
52 #include "FTLExceptionTarget.h"
53 #include "FTLForOSREntryJITCode.h"
54 #include "FTLFormattedValue.h"
55 #include "FTLLazySlowPathCall.h"
56 #include "FTLLoweredNodeValue.h"
57 #include "FTLOperations.h"
58 #include "FTLOutput.h"
59 #include "FTLPatchpointExceptionHandle.h"
60 #include "FTLThunks.h"
61 #include "FTLWeightedTarget.h"
62 #include "JITAddGenerator.h"
63 #include "JITBitAndGenerator.h"
64 #include "JITBitOrGenerator.h"
65 #include "JITBitXorGenerator.h"
66 #include "JITDivGenerator.h"
67 #include "JITInlineCacheGenerator.h"
68 #include "JITLeftShiftGenerator.h"
69 #include "JITMathIC.h"
70 #include "JITMulGenerator.h"
71 #include "JITRightShiftGenerator.h"
72 #include "JITSubGenerator.h"
73 #include "JSCInlines.h"
74 #include "JSGeneratorFunction.h"
75 #include "JSLexicalEnvironment.h"
77 #include "OperandsInlines.h"
78 #include "ScopedArguments.h"
79 #include "ScopedArgumentsTable.h"
80 #include "ScratchRegisterAllocator.h"
81 #include "SetupVarargsFrame.h"
82 #include "ShadowChicken.h"
83 #include "StructureStubInfo.h"
84 #include "VirtualRegister.h"
90 #include <unordered_set>
92 #include <wtf/ProcessID.h>
94 namespace JSC { namespace FTL {
101 std::atomic<int> compileCounter;
104 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
105 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
107 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
108 if (nodeIndex != UINT_MAX)
109 dataLog(", node @", nodeIndex);
115 // Using this instead of typeCheck() helps to reduce the load on B3, by creating
116 // significantly less dead code.
117 #define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
118 FormattedValue _ftc_lowValue = (lowValue); \
119 Edge _ftc_highValue = (highValue); \
120 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
121 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
123 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
126 #define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
127 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
130 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
132 LowerDFGToB3(State& state)
133 : m_graph(state.graph)
136 , m_proc(*state.proc)
137 , m_availabilityCalculator(m_graph)
138 , m_state(state.graph)
139 , m_interpreter(state.graph, m_state)
145 State* state = &m_ftlState;
148 if (verboseCompilationEnabled()) {
150 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
151 "_", codeBlock()->hash());
155 m_graph.ensureDominators();
157 if (verboseCompilationEnabled())
158 dataLog("Function ready, beginning lowering.\n");
160 m_out.initialize(m_heaps);
162 // We use prologue frequency for all of the initialization code.
163 m_out.setFrequency(1);
165 m_prologue = m_out.newBlock();
166 m_handleExceptions = m_out.newBlock();
168 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
169 m_highBlock = m_graph.block(blockIndex);
172 m_out.setFrequency(m_highBlock->executionCount);
173 m_blocks.add(m_highBlock, m_out.newBlock());
176 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
177 m_out.setFrequency(1);
179 m_out.appendTo(m_prologue, m_handleExceptions);
180 m_out.initializeConstants(m_proc, m_prologue);
181 createPhiVariables();
183 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
184 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
185 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
186 state->capturedValue = capturedBase->slot();
188 auto preOrder = m_graph.blocksInPreOrder();
190 m_callFrame = m_out.framePointer();
191 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
192 m_tagMask = m_out.constInt64(TagMask);
194 // Make sure that B3 knows that we really care about the mask registers. This forces the
195 // constants to be materialized in registers.
196 m_proc.addFastConstant(m_tagTypeNumber->key());
197 m_proc.addFastConstant(m_tagMask->key());
199 // Store out callee and argument count for possible OSR exit.
200 m_out.store64(m_out.argumentRegister(argumentRegisterForCallee()), addressFor(CallFrameSlot::callee));
201 m_out.store32(m_out.argumentRegisterInt32(argumentRegisterForArgumentCount()), payloadFor(CallFrameSlot::argumentCount));
203 m_out.storePtr(m_out.constIntPtr(codeBlock()), addressFor(CallFrameSlot::codeBlock));
205 // Stack Overflow Check.
206 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
207 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm().addressOfSoftStackLimit());
208 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
209 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
210 stackOverflowHandler->appendSomeRegister(m_callFrame);
211 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
212 stackOverflowHandler->numGPScratchRegisters = 1;
213 stackOverflowHandler->setGenerator(
214 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
215 AllowMacroScratchRegisterUsage allowScratch(jit);
216 GPRReg fp = params[0].gpr();
217 GPRReg scratch = params.gpScratch(0);
219 unsigned ftlFrameSize = params.proc().frameSize();
221 jit.addPtr(MacroAssembler::TrustedImm32(-std::max(exitFrameSize, ftlFrameSize)), fp, scratch);
222 MacroAssembler::Jump stackOverflow = jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch);
224 params.addLatePath([=] (CCallHelpers& jit) {
225 AllowMacroScratchRegisterUsage allowScratch(jit);
227 stackOverflow.link(&jit);
229 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
230 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
231 jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
233 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
234 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
235 CCallHelpers::Call throwCall = jit.call();
237 jit.move(CCallHelpers::TrustedImmPtr(jit.vm()), GPRInfo::argumentGPR0);
238 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
239 CCallHelpers::Call lookupExceptionHandlerCall = jit.call();
240 jit.jumpToExceptionHandler();
243 [=] (LinkBuffer& linkBuffer) {
244 linkBuffer.link(throwCall, FunctionPtr(operationThrowStackOverflowError));
245 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
250 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
252 availabilityMap().clear();
253 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
255 Vector<Node*, 8> argumentNodes;
256 Vector<LValue, 8> argumentValues;
258 argumentNodes.resize(codeBlock()->numParameters());
259 argumentValues.resize(codeBlock()->numParameters());
261 m_highBlock = m_graph.block(0);
263 for (unsigned i = codeBlock()->numParameters(); i--;) {
264 Node* node = m_graph.m_argumentsForChecking[i];
265 VirtualRegister operand = virtualRegisterForArgument(i);
267 LValue jsValue = nullptr;
270 if (i < NUMBER_OF_JS_FUNCTION_ARGUMENT_REGISTERS) {
271 availabilityMap().m_locals.argument(i) = Availability(node);
272 jsValue = m_out.argumentRegister(GPRInfo::toArgumentRegister(node->argumentRegisterIndex()));
274 setJSValue(node, jsValue);
276 availabilityMap().m_locals.argument(i) =
277 Availability(FlushedAt(FlushedJSValue, operand));
278 jsValue = m_out.load64(addressFor(virtualRegisterForArgument(i)));
281 DFG_ASSERT(m_graph, node, node->hasArgumentRegisterIndex() || operand == node->stackAccessData()->machineLocal);
283 // This is a hack, but it's an effective one. It allows us to do CSE on the
284 // primordial load of arguments. This assumes that the GetLocal that got put in
285 // place of the original SetArgument doesn't have any effects before it. This
287 m_loadedArgumentValues.add(node, jsValue);
290 argumentNodes[i] = node;
291 argumentValues[i] = jsValue;
295 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
296 for (unsigned i = codeBlock()->numParameters(); i--;) {
297 Node* node = argumentNodes[i];
302 LValue jsValue = argumentValues[i];
304 switch (m_graph.m_argumentFormats[i]) {
306 speculate(BadType, jsValueValue(jsValue), node, isNotInt32(jsValue));
309 speculate(BadType, jsValueValue(jsValue), node, isNotBoolean(jsValue));
312 speculate(BadType, jsValueValue(jsValue), node, isNotCell(jsValue));
317 DFG_CRASH(m_graph, node, "Bad flush format for argument");
321 m_out.jump(firstDFGBasicBlock);
323 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
324 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
325 m_out.patchpoint(Void)->setGenerator(
326 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
327 CCallHelpers::Jump jump = jit.jump();
329 [=] (LinkBuffer& linkBuffer) {
330 linkBuffer.link(jump, linkBuffer.locationOf(*exceptionHandler));
335 for (DFG::BasicBlock* block : preOrder)
338 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
339 // to happen last because our abstract heaps are generated lazily. They have to be
340 // generated lazily because we have an infiniten number of numbered, indexed, and
341 // absolute heaps. We only become aware of the ones we actually mention while lowering.
342 m_heaps.computeRangesAndDecorateInstructions();
344 // We create all Phi's up front, but we may then decide not to compile the basic block
345 // that would have contained one of them. So this creates orphans, which triggers B3
346 // validation failures. Calling this fixes the issue.
348 // Note that you should avoid the temptation to make this call conditional upon
349 // validation being enabled. B3 makes no guarantees of any kind of correctness when
350 // dealing with IR that would have failed validation. For example, it would be valid to
351 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
352 // if any orphans were around. We might even have such phases already.
353 m_proc.deleteOrphans();
355 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
356 m_out.applyBlockOrder();
361 void createPhiVariables()
363 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
364 DFG::BasicBlock* block = m_graph.block(blockIndex);
367 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
368 Node* node = block->at(nodeIndex);
369 if (node->op() != DFG::Phi)
372 switch (node->flags() & NodeResultMask) {
373 case NodeResultDouble:
376 case NodeResultInt32:
379 case NodeResultInt52:
382 case NodeResultBoolean:
389 DFG_CRASH(m_graph, node, "Bad Phi node result type");
392 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
397 void compileBlock(DFG::BasicBlock* block)
402 if (verboseCompilationEnabled())
403 dataLog("Compiling block ", *block, "\n");
407 // Make sure that any blocks created while lowering code in the high block have the frequency of
408 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
409 // something roughly approximate for things like register allocation.
410 m_out.setFrequency(m_highBlock->executionCount);
412 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
415 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
416 m_nextHighBlock = m_graph.block(nextBlockIndex);
420 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
422 // All of this effort to find the next block gives us the ability to keep the
423 // generated IR in roughly program order. This ought not affect the performance
424 // of the generated code (since we expect B3 to reorder things) but it will
425 // make IR dumps easier to read.
426 m_out.appendTo(lowBlock, m_nextLowBlock);
428 if (Options::ftlCrashes())
431 if (!m_highBlock->cfaHasVisited) {
432 if (verboseCompilationEnabled())
433 dataLog("Bailing because CFA didn't reach.\n");
434 crash(m_highBlock, nullptr);
438 m_availabilityCalculator.beginBlock(m_highBlock);
441 m_state.beginBasicBlock(m_highBlock);
443 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
444 if (!compileNode(m_nodeIndex))
449 void safelyInvalidateAfterTermination()
451 if (verboseCompilationEnabled())
452 dataLog("Bailing.\n");
455 // Invalidate dominated blocks. Under normal circumstances we would expect
456 // them to be invalidated already. But you can have the CFA become more
457 // precise over time because the structures of objects change on the main
458 // thread. Failing to do this would result in weird crashes due to a value
459 // being used but not defined. Race conditions FTW!
460 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
461 DFG::BasicBlock* target = m_graph.block(blockIndex);
464 if (m_graph.m_dominators->dominates(m_highBlock, target)) {
465 if (verboseCompilationEnabled())
466 dataLog("Block ", *target, " will bail also.\n");
467 target->cfaHasVisited = false;
472 bool compileNode(unsigned nodeIndex)
474 if (!m_state.isValid()) {
475 safelyInvalidateAfterTermination();
479 m_node = m_highBlock->at(nodeIndex);
480 m_origin = m_node->origin;
481 m_out.setOrigin(m_node);
483 if (verboseCompilationEnabled())
484 dataLog("Lowering ", m_node, "\n");
486 m_availableRecoveries.resize(0);
488 m_interpreter.startExecuting();
489 m_interpreter.executeKnownEdgeTypes(m_node);
491 switch (m_node->op()) {
501 compileDoubleConstant();
504 compileInt52Constant();
507 compileLazyJSConstant();
513 compileDoubleAsInt32();
522 compileValueToInt32();
524 case BooleanToNumber:
525 compileBooleanToNumber();
527 case ExtractOSREntryLocal:
528 compileExtractOSREntryLocal();
539 case CallObjectConstructor:
540 compileCallObjectConstructor();
553 compileArithAddOrSub();
569 compileArithMinOrMax();
587 compileArithRandom();
608 compileArithFRound();
611 compileArithNegate();
632 compileUInt32ToNumber();
635 compileCheckStructure();
641 compileCheckNotEmpty();
644 compileCheckBadCell();
646 case CheckStringIdent:
647 compileCheckStringIdent();
650 compileGetExecutable();
652 case ArrayifyToStructure:
653 compileArrayifyToStructure();
656 compilePutStructure();
659 compileGetById(AccessType::GetPure);
663 compileGetById(AccessType::Get);
665 case GetByIdWithThis:
666 compileGetByIdWithThis();
672 compileHasOwnProperty();
679 case PutByIdWithThis:
680 compilePutByIdWithThis();
684 compilePutAccessorById();
686 case PutGetterSetterById:
687 compilePutGetterSetterById();
691 compilePutAccessorByVal();
694 compileGetButterfly();
696 case ConstantStoragePointer:
697 compileConstantStoragePointer();
699 case GetIndexedPropertyStorage:
700 compileGetIndexedPropertyStorage();
706 compileGetArrayLength();
709 compileCheckInBounds();
714 case GetMyArgumentByVal:
715 case GetMyArgumentByValOutOfBounds:
716 compileGetMyArgumentByVal();
718 case GetByValWithThis:
719 compileGetByValWithThis();
726 case PutByValWithThis:
727 compilePutByValWithThis();
729 case DefineDataProperty:
730 compileDefineDataProperty();
732 case DefineAccessorProperty:
733 compileDefineAccessorProperty();
741 case CreateActivation:
742 compileCreateActivation();
745 case NewGeneratorFunction:
746 case NewAsyncFunction:
747 compileNewFunction();
749 case CreateDirectArguments:
750 compileCreateDirectArguments();
752 case CreateScopedArguments:
753 compileCreateScopedArguments();
755 case CreateClonedArguments:
756 compileCreateClonedArguments();
764 case NewArrayWithSpread:
765 compileNewArrayWithSpread();
771 compileNewArrayBuffer();
773 case NewArrayWithSize:
774 compileNewArrayWithSize();
777 compileNewTypedArray();
779 case GetTypedArrayByteOffset:
780 compileGetTypedArrayByteOffset();
782 case AllocatePropertyStorage:
783 compileAllocatePropertyStorage();
785 case ReallocatePropertyStorage:
786 compileReallocatePropertyStorage();
788 case NukeStructureAndSetButterfly:
789 compileNukeStructureAndSetButterfly();
795 case CallStringConstructor:
796 compileToStringOrCallStringConstructor();
799 compileToPrimitive();
805 compileStringCharAt();
807 case StringCharCodeAt:
808 compileStringCharCodeAt();
810 case StringFromCharCode:
811 compileStringFromCharCode();
814 case GetGetterSetterByOffset:
815 compileGetByOffset();
823 case MultiGetByOffset:
824 compileMultiGetByOffset();
827 compilePutByOffset();
829 case MultiPutByOffset:
830 compileMultiPutByOffset();
833 case GetGlobalLexicalVariable:
834 compileGetGlobalVariable();
836 case PutGlobalVariable:
837 compilePutGlobalVariable();
840 compileNotifyWrite();
845 case GetArgumentCountIncludingThis:
846 compileGetArgumentCountIncludingThis();
848 case GetArgumentRegister:
849 compileGetArgumentRegister();
857 case GetGlobalObject:
858 compileGetGlobalObject();
861 compileGetClosureVar();
864 compilePutClosureVar();
866 case GetFromArguments:
867 compileGetFromArguments();
870 compilePutToArguments();
873 compileGetArgument();
878 case CompareStrictEq:
879 compileCompareStrictEq();
882 compileCompareLess();
885 compileCompareLessEq();
888 compileCompareGreater();
890 case CompareGreaterEq:
891 compileCompareGreaterEq();
894 compileCompareEqPtr();
900 case TailCallInlinedCaller:
902 compileCallOrConstruct();
905 case DirectTailCallInlinedCaller:
906 case DirectConstruct:
908 compileDirectCallOrConstruct();
914 case CallForwardVarargs:
915 case TailCallVarargs:
916 case TailCallVarargsInlinedCaller:
917 case TailCallForwardVarargs:
918 case TailCallForwardVarargsInlinedCaller:
919 case ConstructVarargs:
920 case ConstructForwardVarargs:
921 compileCallOrConstructVarargs();
927 compileLoadVarargs();
930 compileForwardVarargs();
945 compileForceOSRExit();
948 case ThrowStaticError:
951 case InvalidationPoint:
952 compileInvalidationPoint();
958 compileIsUndefined();
967 compileIsCellWithType();
973 compileGetMapBucket();
975 case LoadFromJSMapBucket:
976 compileLoadFromJSMapBucket();
978 case IsNonEmptyMapBucket:
979 compileIsNonEmptyMapBucket();
985 compileIsObjectOrNull();
990 case IsTypedArrayView:
991 compileIsTypedArrayView();
996 case CheckTypeInfoFlags:
997 compileCheckTypeInfoFlags();
999 case OverridesHasInstance:
1000 compileOverridesHasInstance();
1003 compileInstanceOf();
1005 case InstanceOfCustom:
1006 compileInstanceOfCustom();
1008 case CountExecution:
1009 compileCountExecution();
1012 case FencedStoreBarrier:
1013 compileStoreBarrier();
1015 case HasIndexedProperty:
1016 compileHasIndexedProperty();
1018 case HasGenericProperty:
1019 compileHasGenericProperty();
1021 case HasStructureProperty:
1022 compileHasStructureProperty();
1024 case GetDirectPname:
1025 compileGetDirectPname();
1027 case GetEnumerableLength:
1028 compileGetEnumerableLength();
1030 case GetPropertyEnumerator:
1031 compileGetPropertyEnumerator();
1033 case GetEnumeratorStructurePname:
1034 compileGetEnumeratorStructurePname();
1036 case GetEnumeratorGenericPname:
1037 compileGetEnumeratorGenericPname();
1040 compileToIndexString();
1042 case CheckStructureImmediate:
1043 compileCheckStructureImmediate();
1045 case MaterializeNewObject:
1046 compileMaterializeNewObject();
1048 case MaterializeCreateActivation:
1049 compileMaterializeCreateActivation();
1051 case CheckWatchdogTimer:
1052 compileCheckWatchdogTimer();
1055 compileCreateRest();
1058 compileGetRestLength();
1061 compileRegExpExec();
1064 compileRegExpTest();
1069 case SetFunctionName:
1070 compileSetFunctionName();
1073 case StringReplaceRegExp:
1074 compileStringReplace();
1076 case GetRegExpObjectLastIndex:
1077 compileGetRegExpObjectLastIndex();
1079 case SetRegExpObjectLastIndex:
1080 compileSetRegExpObjectLastIndex();
1082 case LogShadowChickenPrologue:
1083 compileLogShadowChickenPrologue();
1085 case LogShadowChickenTail:
1086 compileLogShadowChickenTail();
1088 case RecordRegExpCachedResult:
1089 compileRecordRegExpCachedResult();
1092 compileResolveScope();
1095 compileGetDynamicVar();
1098 compilePutDynamicVar();
1101 compileUnreachable();
1104 compileToLowerCase();
1113 compileCallDOMGetter();
1121 case PhantomNewObject:
1122 case PhantomNewFunction:
1123 case PhantomNewGeneratorFunction:
1124 case PhantomNewAsyncFunction:
1125 case PhantomCreateActivation:
1126 case PhantomDirectArguments:
1127 case PhantomCreateRest:
1129 case PhantomNewArrayWithSpread:
1130 case PhantomClonedArguments:
1136 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1140 if (m_node->isTerminal())
1143 if (!m_state.isValid()) {
1144 safelyInvalidateAfterTermination();
1148 m_availabilityCalculator.executeNode(m_node);
1149 m_interpreter.executeEffects(nodeIndex);
1154 void compileUpsilon()
1156 LValue upsilonValue = nullptr;
1157 switch (m_node->child1().useKind()) {
1159 upsilonValue = lowDouble(m_node->child1());
1163 upsilonValue = lowInt32(m_node->child1());
1166 upsilonValue = lowInt52(m_node->child1());
1169 case KnownBooleanUse:
1170 upsilonValue = lowBoolean(m_node->child1());
1174 upsilonValue = lowCell(m_node->child1());
1177 upsilonValue = lowJSValue(m_node->child1());
1180 DFG_CRASH(m_graph, m_node, "Bad use kind");
1183 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1184 LValue phiNode = m_phis.get(m_node->phi());
1185 m_out.addIncomingToPhi(phiNode, upsilon);
1190 LValue phi = m_phis.get(m_node);
1191 m_out.m_block->append(phi);
1193 switch (m_node->flags() & NodeResultMask) {
1194 case NodeResultDouble:
1197 case NodeResultInt32:
1200 case NodeResultInt52:
1203 case NodeResultBoolean:
1210 DFG_CRASH(m_graph, m_node, "Bad use kind");
1215 void compileDoubleConstant()
1217 setDouble(m_out.constDouble(m_node->asNumber()));
1220 void compileInt52Constant()
1222 int64_t value = m_node->asAnyInt();
1224 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1225 setStrictInt52(m_out.constInt64(value));
1228 void compileLazyJSConstant()
1230 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1231 LazyJSValue value = m_node->lazyJSValue();
1232 patchpoint->setGenerator(
1233 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1234 value.emit(jit, JSValueRegs(params[0].gpr()));
1236 patchpoint->effects = Effects::none();
1237 setJSValue(patchpoint);
1240 void compileDoubleRep()
1242 switch (m_node->child1().useKind()) {
1243 case RealNumberUse: {
1244 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1246 LValue doubleValue = unboxDouble(value);
1248 LBasicBlock intCase = m_out.newBlock();
1249 LBasicBlock continuation = m_out.newBlock();
1251 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1253 m_out.doubleEqual(doubleValue, doubleValue),
1254 usually(continuation), rarely(intCase));
1256 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1259 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1260 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1261 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1262 m_out.jump(continuation);
1264 m_out.appendTo(continuation, lastNext);
1266 setDouble(m_out.phi(Double, fastResult, slowResult));
1272 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1274 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1276 LBasicBlock intCase = m_out.newBlock();
1277 LBasicBlock doubleTesting = m_out.newBlock();
1278 LBasicBlock doubleCase = m_out.newBlock();
1279 LBasicBlock nonDoubleCase = m_out.newBlock();
1280 LBasicBlock continuation = m_out.newBlock();
1283 isNotInt32(value, provenType(m_node->child1())),
1284 unsure(doubleTesting), unsure(intCase));
1286 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1288 ValueFromBlock intToDouble = m_out.anchor(
1289 m_out.intToDouble(unboxInt32(value)));
1290 m_out.jump(continuation);
1292 m_out.appendTo(doubleTesting, doubleCase);
1293 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1294 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1296 m_out.appendTo(doubleCase, nonDoubleCase);
1297 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1298 m_out.jump(continuation);
1300 if (shouldConvertNonNumber) {
1301 LBasicBlock undefinedCase = m_out.newBlock();
1302 LBasicBlock testNullCase = m_out.newBlock();
1303 LBasicBlock nullCase = m_out.newBlock();
1304 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1305 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1306 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1308 m_out.appendTo(nonDoubleCase, undefinedCase);
1309 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1310 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1312 m_out.appendTo(undefinedCase, testNullCase);
1313 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1314 m_out.jump(continuation);
1316 m_out.appendTo(testNullCase, nullCase);
1317 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1318 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1320 m_out.appendTo(nullCase, testBooleanTrueCase);
1321 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1322 m_out.jump(continuation);
1324 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1325 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1326 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1328 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1329 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1330 m_out.jump(continuation);
1332 m_out.appendTo(convertBooleanFalseCase, continuation);
1334 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1335 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCell, valueIsNotBooleanFalse);
1336 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1337 m_out.jump(continuation);
1339 m_out.appendTo(continuation, lastNext);
1340 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1343 m_out.appendTo(nonDoubleCase, continuation);
1344 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1345 m_out.unreachable();
1347 m_out.appendTo(continuation, lastNext);
1349 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1354 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1359 DFG_CRASH(m_graph, m_node, "Bad use kind");
1363 void compileDoubleAsInt32()
1365 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1366 setInt32(integerValue);
1369 void compileValueRep()
1371 switch (m_node->child1().useKind()) {
1372 case DoubleRepUse: {
1373 LValue value = lowDouble(m_node->child1());
1375 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1376 value = m_out.select(
1377 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1380 setJSValue(boxDouble(value));
1385 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1390 DFG_CRASH(m_graph, m_node, "Bad use kind");
1394 void compileInt52Rep()
1396 switch (m_node->child1().useKind()) {
1398 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1403 jsValueToStrictInt52(
1404 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1407 case DoubleRepAnyIntUse:
1409 doubleToStrictInt52(
1410 m_node->child1(), lowDouble(m_node->child1())));
1414 RELEASE_ASSERT_NOT_REACHED();
1418 void compileValueToInt32()
1420 switch (m_node->child1().useKind()) {
1422 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1426 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1431 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1432 if (isValid(value)) {
1433 setInt32(value.value());
1437 value = m_jsValueValues.get(m_node->child1().node());
1438 if (isValid(value)) {
1439 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1443 // We'll basically just get here for constants. But it's good to have this
1444 // catch-all since we often add new representations into the mix.
1446 numberOrNotCellToInt32(
1448 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1453 DFG_CRASH(m_graph, m_node, "Bad use kind");
1458 void compileBooleanToNumber()
1460 switch (m_node->child1().useKind()) {
1462 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1467 LValue value = lowJSValue(m_node->child1());
1469 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1470 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1474 LBasicBlock booleanCase = m_out.newBlock();
1475 LBasicBlock continuation = m_out.newBlock();
1477 ValueFromBlock notBooleanResult = m_out.anchor(value);
1479 isBoolean(value, provenType(m_node->child1())),
1480 unsure(booleanCase), unsure(continuation));
1482 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1483 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1484 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1485 m_out.jump(continuation);
1487 m_out.appendTo(continuation, lastNext);
1488 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1493 RELEASE_ASSERT_NOT_REACHED();
1498 void compileExtractOSREntryLocal()
1500 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1501 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1502 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1505 void compileGetStack()
1507 // GetLocals arise only for captured variables and arguments. For arguments, we might have
1508 // already loaded it.
1509 if (LValue value = m_loadedArgumentValues.get(m_node)) {
1514 StackAccessData* data = m_node->stackAccessData();
1515 AbstractValue& value = m_state.variables().operand(data->local);
1517 DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
1518 DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
1520 if (isInt32Speculation(value.m_type))
1521 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1523 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1526 void compilePutStack()
1528 StackAccessData* data = m_node->stackAccessData();
1529 switch (data->format) {
1530 case FlushedJSValue: {
1531 LValue value = lowJSValue(m_node->child1());
1532 m_out.store64(value, addressFor(data->machineLocal));
1536 case FlushedDouble: {
1537 LValue value = lowDouble(m_node->child1());
1538 m_out.storeDouble(value, addressFor(data->machineLocal));
1542 case FlushedInt32: {
1543 LValue value = lowInt32(m_node->child1());
1544 m_out.store32(value, payloadFor(data->machineLocal));
1548 case FlushedInt52: {
1549 LValue value = lowInt52(m_node->child1());
1550 m_out.store64(value, addressFor(data->machineLocal));
1555 LValue value = lowCell(m_node->child1());
1556 m_out.store64(value, addressFor(data->machineLocal));
1560 case FlushedBoolean: {
1561 speculateBoolean(m_node->child1());
1563 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1564 addressFor(data->machineLocal));
1569 DFG_CRASH(m_graph, m_node, "Bad flush format");
1576 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1579 void compileCallObjectConstructor()
1581 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
1582 LValue value = lowJSValue(m_node->child1());
1584 LBasicBlock isCellCase = m_out.newBlock();
1585 LBasicBlock slowCase = m_out.newBlock();
1586 LBasicBlock continuation = m_out.newBlock();
1588 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1590 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1591 ValueFromBlock fastResult = m_out.anchor(value);
1592 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
1594 m_out.appendTo(slowCase, continuation);
1595 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationObjectConstructor), m_callFrame, m_out.constIntPtr(globalObject), value));
1596 m_out.jump(continuation);
1598 m_out.appendTo(continuation, lastNext);
1599 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1602 void compileToThis()
1604 LValue value = lowJSValue(m_node->child1());
1606 LBasicBlock isCellCase = m_out.newBlock();
1607 LBasicBlock slowCase = m_out.newBlock();
1608 LBasicBlock continuation = m_out.newBlock();
1611 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1613 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1614 ValueFromBlock fastResult = m_out.anchor(value);
1617 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
1618 m_out.constInt32(OverridesToThis)),
1619 usually(continuation), rarely(slowCase));
1621 m_out.appendTo(slowCase, continuation);
1622 J_JITOperation_EJ function;
1623 if (m_graph.isStrictModeFor(m_node->origin.semantic))
1624 function = operationToThisStrict;
1626 function = operationToThis;
1627 ValueFromBlock slowResult = m_out.anchor(
1628 vmCall(Int64, m_out.operation(function), m_callFrame, value));
1629 m_out.jump(continuation);
1631 m_out.appendTo(continuation, lastNext);
1632 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1635 void compileValueAdd()
1637 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
1638 JITAddIC* addIC = codeBlock()->addJITAddIC(arithProfile);
1639 auto repatchingFunction = operationValueAddOptimize;
1640 auto nonRepatchingFunction = operationValueAdd;
1641 compileMathIC(addIC, repatchingFunction, nonRepatchingFunction);
1644 template <typename Generator>
1645 void compileMathIC(JITUnaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1647 Node* node = m_node;
1649 LValue operand = lowJSValue(node->child1());
1651 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1652 patchpoint->appendSomeRegister(operand);
1653 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1654 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1655 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
1656 patchpoint->numGPScratchRegisters = 1;
1657 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1658 State* state = &m_ftlState;
1659 patchpoint->setGenerator(
1660 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1661 AllowMacroScratchRegisterUsage allowScratch(jit);
1663 Box<CCallHelpers::JumpList> exceptions =
1664 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1666 #if ENABLE(MATH_IC_STATS)
1667 auto inlineStart = jit.label();
1670 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1671 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
1673 bool shouldEmitProfiling = false;
1674 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1676 if (generatedInline) {
1677 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1678 auto done = jit.label();
1679 params.addLatePath([=] (CCallHelpers& jit) {
1680 AllowMacroScratchRegisterUsage allowScratch(jit);
1681 mathICGenerationState->slowPathJumps.link(&jit);
1682 mathICGenerationState->slowPathStart = jit.label();
1683 #if ENABLE(MATH_IC_STATS)
1684 auto slowPathStart = jit.label();
1687 if (mathICGenerationState->shouldSlowPathRepatch) {
1688 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1689 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1690 mathICGenerationState->slowPathCall = call.call();
1692 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1693 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1694 mathICGenerationState->slowPathCall = call.call();
1696 jit.jump().linkTo(done, &jit);
1698 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1699 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1702 #if ENABLE(MATH_IC_STATS)
1703 auto slowPathEnd = jit.label();
1704 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1705 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
1706 mathIC->m_generatedCodeSize += size;
1712 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1713 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1716 #if ENABLE(MATH_IC_STATS)
1717 auto inlineEnd = jit.label();
1718 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1719 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
1720 mathIC->m_generatedCodeSize += size;
1725 setJSValue(patchpoint);
1728 template <typename Generator>
1729 void compileMathIC(JITBinaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1731 Node* node = m_node;
1733 LValue left = lowJSValue(node->child1());
1734 LValue right = lowJSValue(node->child2());
1736 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
1737 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
1739 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1740 patchpoint->appendSomeRegister(left);
1741 patchpoint->appendSomeRegister(right);
1742 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1743 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1744 RefPtr<PatchpointExceptionHandle> exceptionHandle =
1745 preparePatchpointForExceptions(patchpoint);
1746 patchpoint->numGPScratchRegisters = 1;
1747 patchpoint->numFPScratchRegisters = 2;
1748 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1749 State* state = &m_ftlState;
1750 patchpoint->setGenerator(
1751 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1752 AllowMacroScratchRegisterUsage allowScratch(jit);
1754 Box<CCallHelpers::JumpList> exceptions =
1755 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1757 #if ENABLE(MATH_IC_STATS)
1758 auto inlineStart = jit.label();
1761 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1762 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
1763 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
1764 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
1766 bool shouldEmitProfiling = false;
1767 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1769 if (generatedInline) {
1770 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1771 auto done = jit.label();
1772 params.addLatePath([=] (CCallHelpers& jit) {
1773 AllowMacroScratchRegisterUsage allowScratch(jit);
1774 mathICGenerationState->slowPathJumps.link(&jit);
1775 mathICGenerationState->slowPathStart = jit.label();
1776 #if ENABLE(MATH_IC_STATS)
1777 auto slowPathStart = jit.label();
1780 if (mathICGenerationState->shouldSlowPathRepatch) {
1781 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1782 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1783 mathICGenerationState->slowPathCall = call.call();
1785 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1786 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1787 mathICGenerationState->slowPathCall = call.call();
1789 jit.jump().linkTo(done, &jit);
1791 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1792 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1795 #if ENABLE(MATH_IC_STATS)
1796 auto slowPathEnd = jit.label();
1797 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1798 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
1799 mathIC->m_generatedCodeSize += size;
1805 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1806 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1809 #if ENABLE(MATH_IC_STATS)
1810 auto inlineEnd = jit.label();
1811 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1812 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
1813 mathIC->m_generatedCodeSize += size;
1818 setJSValue(patchpoint);
1821 void compileStrCat()
1824 if (m_node->child3()) {
1826 Int64, m_out.operation(operationStrCat3), m_callFrame,
1827 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1828 lowJSValue(m_node->child2(), ManualOperandSpeculation),
1829 lowJSValue(m_node->child3(), ManualOperandSpeculation));
1832 Int64, m_out.operation(operationStrCat2), m_callFrame,
1833 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1834 lowJSValue(m_node->child2(), ManualOperandSpeculation));
1839 void compileArithAddOrSub()
1841 bool isSub = m_node->op() == ArithSub;
1842 switch (m_node->binaryUseKind()) {
1844 LValue left = lowInt32(m_node->child1());
1845 LValue right = lowInt32(m_node->child2());
1847 if (!shouldCheckOverflow(m_node->arithMode())) {
1848 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
1852 CheckValue* result =
1853 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1854 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1860 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)
1861 && !abstractValue(m_node->child2()).couldBeType(SpecInt52Only)) {
1863 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1864 LValue right = lowInt52(m_node->child2(), kind);
1865 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
1869 LValue left = lowInt52(m_node->child1());
1870 LValue right = lowInt52(m_node->child2());
1871 CheckValue* result =
1872 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1873 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1878 case DoubleRepUse: {
1879 LValue C1 = lowDouble(m_node->child1());
1880 LValue C2 = lowDouble(m_node->child2());
1882 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
1888 DFG_CRASH(m_graph, m_node, "Bad use kind");
1892 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
1893 JITSubIC* subIC = codeBlock()->addJITSubIC(arithProfile);
1894 auto repatchingFunction = operationValueSubOptimize;
1895 auto nonRepatchingFunction = operationValueSub;
1896 compileMathIC(subIC, repatchingFunction, nonRepatchingFunction);
1901 DFG_CRASH(m_graph, m_node, "Bad use kind");
1906 void compileArithClz32()
1908 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
1909 LValue operand = lowInt32(m_node->child1());
1910 setInt32(m_out.ctlz32(operand));
1913 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
1914 LValue argument = lowJSValue(m_node->child1());
1915 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
1919 void compileArithMul()
1921 switch (m_node->binaryUseKind()) {
1923 LValue left = lowInt32(m_node->child1());
1924 LValue right = lowInt32(m_node->child2());
1928 if (!shouldCheckOverflow(m_node->arithMode()))
1929 result = m_out.mul(left, right);
1931 CheckValue* speculation = m_out.speculateMul(left, right);
1932 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
1933 result = speculation;
1936 if (shouldCheckNegativeZero(m_node->arithMode())) {
1937 LBasicBlock slowCase = m_out.newBlock();
1938 LBasicBlock continuation = m_out.newBlock();
1941 m_out.notZero32(result), usually(continuation), rarely(slowCase));
1943 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
1944 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
1945 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
1946 m_out.jump(continuation);
1947 m_out.appendTo(continuation, lastNext);
1956 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1957 LValue right = lowInt52(m_node->child2(), opposite(kind));
1959 CheckValue* result = m_out.speculateMul(left, right);
1960 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1962 if (shouldCheckNegativeZero(m_node->arithMode())) {
1963 LBasicBlock slowCase = m_out.newBlock();
1964 LBasicBlock continuation = m_out.newBlock();
1967 m_out.notZero64(result), usually(continuation), rarely(slowCase));
1969 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
1970 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
1971 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
1972 m_out.jump(continuation);
1973 m_out.appendTo(continuation, lastNext);
1980 case DoubleRepUse: {
1982 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
1987 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
1988 JITMulIC* mulIC = codeBlock()->addJITMulIC(arithProfile);
1989 auto repatchingFunction = operationValueMulOptimize;
1990 auto nonRepatchingFunction = operationValueMul;
1991 compileMathIC(mulIC, repatchingFunction, nonRepatchingFunction);
1996 DFG_CRASH(m_graph, m_node, "Bad use kind");
2001 void compileArithDiv()
2003 switch (m_node->binaryUseKind()) {
2005 LValue numerator = lowInt32(m_node->child1());
2006 LValue denominator = lowInt32(m_node->child2());
2008 if (shouldCheckNegativeZero(m_node->arithMode())) {
2009 LBasicBlock zeroNumerator = m_out.newBlock();
2010 LBasicBlock numeratorContinuation = m_out.newBlock();
2013 m_out.isZero32(numerator),
2014 rarely(zeroNumerator), usually(numeratorContinuation));
2016 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2019 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2021 m_out.jump(numeratorContinuation);
2023 m_out.appendTo(numeratorContinuation, innerLastNext);
2026 if (shouldCheckOverflow(m_node->arithMode())) {
2027 LBasicBlock unsafeDenominator = m_out.newBlock();
2028 LBasicBlock continuation = m_out.newBlock();
2030 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2032 m_out.above(adjustedDenominator, m_out.int32One),
2033 usually(continuation), rarely(unsafeDenominator));
2035 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2036 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2037 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2038 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2039 m_out.jump(continuation);
2041 m_out.appendTo(continuation, lastNext);
2042 LValue result = m_out.div(numerator, denominator);
2044 Overflow, noValue(), 0,
2045 m_out.notEqual(m_out.mul(result, denominator), numerator));
2048 setInt32(m_out.chillDiv(numerator, denominator));
2053 case DoubleRepUse: {
2054 setDouble(m_out.doubleDiv(
2055 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2060 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2065 DFG_CRASH(m_graph, m_node, "Bad use kind");
2070 void compileArithMod()
2072 switch (m_node->binaryUseKind()) {
2074 LValue numerator = lowInt32(m_node->child1());
2075 LValue denominator = lowInt32(m_node->child2());
2078 if (shouldCheckOverflow(m_node->arithMode())) {
2079 LBasicBlock unsafeDenominator = m_out.newBlock();
2080 LBasicBlock continuation = m_out.newBlock();
2082 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2084 m_out.above(adjustedDenominator, m_out.int32One),
2085 usually(continuation), rarely(unsafeDenominator));
2087 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2088 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2089 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2090 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2091 m_out.jump(continuation);
2093 m_out.appendTo(continuation, lastNext);
2094 LValue result = m_out.mod(numerator, denominator);
2097 remainder = m_out.chillMod(numerator, denominator);
2099 if (shouldCheckNegativeZero(m_node->arithMode())) {
2100 LBasicBlock negativeNumerator = m_out.newBlock();
2101 LBasicBlock numeratorContinuation = m_out.newBlock();
2104 m_out.lessThan(numerator, m_out.int32Zero),
2105 unsure(negativeNumerator), unsure(numeratorContinuation));
2107 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2109 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2111 m_out.jump(numeratorContinuation);
2113 m_out.appendTo(numeratorContinuation, innerLastNext);
2116 setInt32(remainder);
2120 case DoubleRepUse: {
2122 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2127 DFG_CRASH(m_graph, m_node, "Bad use kind");
2132 void compileArithMinOrMax()
2134 switch (m_node->binaryUseKind()) {
2136 LValue left = lowInt32(m_node->child1());
2137 LValue right = lowInt32(m_node->child2());
2141 m_node->op() == ArithMin
2142 ? m_out.lessThan(left, right)
2143 : m_out.lessThan(right, left),
2148 case DoubleRepUse: {
2149 LValue left = lowDouble(m_node->child1());
2150 LValue right = lowDouble(m_node->child2());
2152 LBasicBlock notLessThan = m_out.newBlock();
2153 LBasicBlock continuation = m_out.newBlock();
2155 Vector<ValueFromBlock, 2> results;
2157 results.append(m_out.anchor(left));
2159 m_node->op() == ArithMin
2160 ? m_out.doubleLessThan(left, right)
2161 : m_out.doubleGreaterThan(left, right),
2162 unsure(continuation), unsure(notLessThan));
2164 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2165 results.append(m_out.anchor(m_out.select(
2166 m_node->op() == ArithMin
2167 ? m_out.doubleGreaterThanOrEqual(left, right)
2168 : m_out.doubleLessThanOrEqual(left, right),
2169 right, m_out.constDouble(PNaN))));
2170 m_out.jump(continuation);
2172 m_out.appendTo(continuation, lastNext);
2173 setDouble(m_out.phi(Double, results));
2178 DFG_CRASH(m_graph, m_node, "Bad use kind");
2183 void compileArithAbs()
2185 switch (m_node->child1().useKind()) {
2187 LValue value = lowInt32(m_node->child1());
2189 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2190 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2192 if (shouldCheckOverflow(m_node->arithMode()))
2193 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2199 case DoubleRepUse: {
2200 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2205 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2206 LValue argument = lowJSValue(m_node->child1());
2207 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2214 void compileArithSin()
2216 if (m_node->child1().useKind() == DoubleRepUse) {
2217 setDouble(m_out.doubleSin(lowDouble(m_node->child1())));
2220 LValue argument = lowJSValue(m_node->child1());
2221 LValue result = vmCall(Double, m_out.operation(operationArithSin), m_callFrame, argument);
2225 void compileArithCos()
2227 if (m_node->child1().useKind() == DoubleRepUse) {
2228 setDouble(m_out.doubleCos(lowDouble(m_node->child1())));
2231 LValue argument = lowJSValue(m_node->child1());
2232 LValue result = vmCall(Double, m_out.operation(operationArithCos), m_callFrame, argument);
2236 void compileArithTan()
2238 if (m_node->child1().useKind() == DoubleRepUse) {
2239 setDouble(m_out.doubleTan(lowDouble(m_node->child1())));
2242 LValue argument = lowJSValue(m_node->child1());
2243 LValue result = vmCall(Double, m_out.operation(operationArithTan), m_callFrame, argument);
2247 void compileArithPow()
2249 if (m_node->child2().useKind() == Int32Use)
2250 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2252 LValue base = lowDouble(m_node->child1());
2253 LValue exponent = lowDouble(m_node->child2());
2255 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2256 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2257 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2258 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2259 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2260 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2261 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2262 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2263 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2264 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2265 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2266 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2267 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2268 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2269 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2270 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2271 LBasicBlock powBlock = m_out.newBlock();
2272 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2273 LBasicBlock continuation = m_out.newBlock();
2275 LValue integerExponent = m_out.doubleToInt(exponent);
2276 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2277 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2278 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2280 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2281 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2282 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2284 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2285 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2286 m_out.jump(continuation);
2288 // If y is NaN, the result is NaN.
2289 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2290 LValue exponentIsNaN;
2291 if (provenType(m_node->child2()) & SpecDoubleNaN)
2292 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2294 exponentIsNaN = m_out.booleanFalse;
2295 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2297 // If abs(x) is 1 and y is +infinity, the result is NaN.
2298 // If abs(x) is 1 and y is -infinity, the result is NaN.
2300 // Test if base == 1.
2301 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2302 LValue absoluteBase = m_out.doubleAbs(base);
2303 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2304 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2306 // Test if abs(y) == Infinity.
2307 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2308 LValue absoluteExponent = m_out.doubleAbs(exponent);
2309 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2310 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2312 // If y == 0.5 or y == -0.5, handle it through SQRT.
2313 // We have be carefuly with -0 and -Infinity.
2316 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2317 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2318 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2321 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2322 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2323 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2324 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2326 // Test if abs(x) == Infinity.
2327 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2328 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2329 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2331 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2332 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2333 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2334 m_out.jump(continuation);
2336 // The exponent is 0.5, the base is infinite, the result is always infinite.
2337 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2338 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2339 m_out.jump(continuation);
2341 // Test if y == -0.5
2342 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2343 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2344 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2347 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2348 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2349 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2351 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2352 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2353 m_out.jump(continuation);
2355 // Test if abs(x) == Infinity.
2356 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2357 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2358 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2360 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2361 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2362 LValue sqrtBase = m_out.doubleSqrt(base);
2363 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2364 m_out.jump(continuation);
2366 // The exponent is -0.5, the base is infinite, the result is always zero.
2367 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2368 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2369 m_out.jump(continuation);
2371 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2372 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2373 m_out.jump(continuation);
2375 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2376 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2377 m_out.jump(continuation);
2379 m_out.appendTo(continuation, lastNext);
2380 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2384 void compileArithRandom()
2386 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2388 // Inlined WeakRandom::advance().
2389 // uint64_t x = m_low;
2390 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2391 LValue low = m_out.load64(m_out.absolute(lowAddress));
2392 // uint64_t y = m_high;
2393 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2394 LValue high = m_out.load64(m_out.absolute(highAddress));
2396 m_out.store64(high, m_out.absolute(lowAddress));
2399 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2402 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2404 // x ^= y ^ (y >> 26);
2405 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2408 m_out.store64(phase3, m_out.absolute(highAddress));
2411 LValue random64 = m_out.add(phase3, high);
2413 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2414 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2416 LValue double53Integer = m_out.intToDouble(random53);
2418 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2419 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2420 static const double scale = 1.0 / (1ULL << 53);
2422 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2423 // It just reduces the exp part of the given 53bit double integer.
2424 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2425 // Now we get 53bit precision random double value in [0, 1).
2426 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2431 void compileArithRound()
2433 if (m_node->child1().useKind() == DoubleRepUse) {
2434 LValue result = nullptr;
2435 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2436 LValue value = lowDouble(m_node->child1());
2437 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2439 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2440 LBasicBlock continuation = m_out.newBlock();
2442 LValue value = lowDouble(m_node->child1());
2443 LValue integerValue = m_out.doubleCeil(value);
2444 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2446 LValue realPart = m_out.doubleSub(integerValue, value);
2448 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2450 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2451 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2452 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2453 m_out.jump(continuation);
2454 m_out.appendTo(continuation, lastNext);
2456 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2459 if (producesInteger(m_node->arithRoundingMode())) {
2460 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2461 setInt32(integerValue);
2467 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2468 LValue argument = lowJSValue(m_node->child1());
2469 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2472 void compileArithFloor()
2474 if (m_node->child1().useKind() == DoubleRepUse) {
2475 LValue value = lowDouble(m_node->child1());
2476 LValue integerValue = m_out.doubleFloor(value);
2477 if (producesInteger(m_node->arithRoundingMode()))
2478 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2480 setDouble(integerValue);
2483 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2484 LValue argument = lowJSValue(m_node->child1());
2485 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2488 void compileArithCeil()
2490 if (m_node->child1().useKind() == DoubleRepUse) {
2491 LValue value = lowDouble(m_node->child1());
2492 LValue integerValue = m_out.doubleCeil(value);
2493 if (producesInteger(m_node->arithRoundingMode()))
2494 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2496 setDouble(integerValue);
2499 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2500 LValue argument = lowJSValue(m_node->child1());
2501 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2504 void compileArithTrunc()
2506 if (m_node->child1().useKind() == DoubleRepUse) {
2507 LValue value = lowDouble(m_node->child1());
2508 LValue result = m_out.doubleTrunc(value);
2509 if (producesInteger(m_node->arithRoundingMode()))
2510 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2515 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2516 LValue argument = lowJSValue(m_node->child1());
2517 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
2520 void compileArithSqrt()
2522 if (m_node->child1().useKind() == DoubleRepUse) {
2523 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
2526 LValue argument = lowJSValue(m_node->child1());
2527 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
2531 void compileArithLog()
2533 if (m_node->child1().useKind() == DoubleRepUse) {
2534 setDouble(m_out.doubleLog(lowDouble(m_node->child1())));
2537 LValue argument = lowJSValue(m_node->child1());
2538 LValue result = vmCall(Double, m_out.operation(operationArithLog), m_callFrame, argument);
2542 void compileArithFRound()
2544 if (m_node->child1().useKind() == DoubleRepUse) {
2545 setDouble(m_out.fround(lowDouble(m_node->child1())));
2548 LValue argument = lowJSValue(m_node->child1());
2549 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
2553 void compileArithNegate()
2555 switch (m_node->child1().useKind()) {
2557 LValue value = lowInt32(m_node->child1());
2560 if (!shouldCheckOverflow(m_node->arithMode()))
2561 result = m_out.neg(value);
2562 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
2563 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
2564 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
2567 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
2568 result = m_out.neg(value);
2576 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)) {
2578 LValue value = lowWhicheverInt52(m_node->child1(), kind);
2579 LValue result = m_out.neg(value);
2580 if (shouldCheckNegativeZero(m_node->arithMode()))
2581 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2582 setInt52(result, kind);
2586 LValue value = lowInt52(m_node->child1());
2587 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
2588 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
2589 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2594 case DoubleRepUse: {
2595 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
2600 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2601 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2602 JITNegIC* negIC = codeBlock()->addJITNegIC(arithProfile);
2603 auto repatchingFunction = operationArithNegateOptimize;
2604 auto nonRepatchingFunction = operationArithNegate;
2605 compileMathIC(negIC, repatchingFunction, nonRepatchingFunction);
2610 void compileBitAnd()
2612 if (m_node->isBinaryUseKind(UntypedUse)) {
2613 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
2616 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2621 if (m_node->isBinaryUseKind(UntypedUse)) {
2622 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
2625 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2628 void compileBitXor()
2630 if (m_node->isBinaryUseKind(UntypedUse)) {
2631 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
2634 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2637 void compileBitRShift()
2639 if (m_node->isBinaryUseKind(UntypedUse)) {
2640 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
2643 setInt32(m_out.aShr(
2644 lowInt32(m_node->child1()),
2645 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2648 void compileBitLShift()
2650 if (m_node->isBinaryUseKind(UntypedUse)) {
2651 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
2655 lowInt32(m_node->child1()),
2656 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2659 void compileBitURShift()
2661 if (m_node->isBinaryUseKind(UntypedUse)) {
2662 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
2665 setInt32(m_out.lShr(
2666 lowInt32(m_node->child1()),
2667 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2670 void compileUInt32ToNumber()
2672 LValue value = lowInt32(m_node->child1());
2674 if (doesOverflow(m_node->arithMode())) {
2675 setStrictInt52(m_out.zeroExtPtr(value));
2679 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
2683 void compileCheckStructure()
2686 if (m_node->child1()->hasConstant())
2687 exitKind = BadConstantCache;
2689 exitKind = BadCache;
2691 switch (m_node->child1().useKind()) {
2693 case KnownCellUse: {
2694 LValue cell = lowCell(m_node->child1());
2697 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2698 exitKind, m_node->structureSet(),
2699 [&] (Structure* structure) {
2700 return weakStructureID(structure);
2705 case CellOrOtherUse: {
2706 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
2708 LBasicBlock cellCase = m_out.newBlock();
2709 LBasicBlock notCellCase = m_out.newBlock();
2710 LBasicBlock continuation = m_out.newBlock();
2713 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2715 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2717 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
2718 exitKind, m_node->structureSet(),
2719 [&] (Structure* structure) {
2720 return weakStructureID(structure);
2722 m_out.jump(continuation);
2724 m_out.appendTo(notCellCase, continuation);
2725 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
2726 m_out.jump(continuation);
2728 m_out.appendTo(continuation, lastNext);
2733 DFG_CRASH(m_graph, m_node, "Bad use kind");
2738 void compileCheckCell()
2740 LValue cell = lowCell(m_node->child1());
2743 BadCell, jsValueValue(cell), m_node->child1().node(),
2744 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
2747 void compileCheckBadCell()
2752 void compileCheckNotEmpty()
2754 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
2757 void compileCheckStringIdent()
2759 UniquedStringImpl* uid = m_node->uidOperand();
2760 LValue stringImpl = lowStringIdent(m_node->child1());
2761 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
2764 void compileGetExecutable()
2766 LValue cell = lowCell(m_node->child1());
2767 speculateFunction(m_node->child1(), cell);
2768 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
2771 void compileArrayifyToStructure()
2773 LValue cell = lowCell(m_node->child1());
2774 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
2776 LBasicBlock unexpectedStructure = m_out.newBlock();
2777 LBasicBlock continuation = m_out.newBlock();
2779 LValue structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
2782 m_out.notEqual(structureID, weakStructureID(m_node->structure())),
2783 rarely(unexpectedStructure), usually(continuation));
2785 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
2788 switch (m_node->arrayMode().type()) {
2791 case Array::Contiguous:
2793 Uncountable, noValue(), 0,
2794 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
2801 switch (m_node->arrayMode().type()) {
2803 vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
2806 vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
2808 case Array::Contiguous:
2809 vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
2811 case Array::ArrayStorage:
2812 case Array::SlowPutArrayStorage:
2813 vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
2816 DFG_CRASH(m_graph, m_node, "Bad array type");
2820 structureID = m_out.load32(cell, m_heaps.JSCell_structureID);
2822 BadIndexingType, jsValueValue(cell), 0,
2823 m_out.notEqual(structureID, weakStructureID(m_node->structure())));
2824 m_out.jump(continuation);
2826 m_out.appendTo(continuation, lastNext);
2829 void compilePutStructure()
2831 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
2833 Structure* oldStructure = m_node->transition()->previous;
2834 Structure* newStructure = m_node->transition()->next;
2835 ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
2836 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
2837 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
2839 LValue cell = lowCell(m_node->child1());
2841 weakStructureID(newStructure),
2842 cell, m_heaps.JSCell_structureID);
2845 void compileGetById(AccessType type)
2847 ASSERT(type == AccessType::Get || type == AccessType::GetPure);
2848 switch (m_node->child1().useKind()) {
2850 setJSValue(getById(lowCell(m_node->child1()), type));
2855 // This is pretty weird, since we duplicate the slow path both here and in the
2856 // code generated by the IC. We should investigate making this less bad.
2857 // https://bugs.webkit.org/show_bug.cgi?id=127830
2858 LValue value = lowJSValue(m_node->child1());
2860 LBasicBlock cellCase = m_out.newBlock();
2861 LBasicBlock notCellCase = m_out.newBlock();
2862 LBasicBlock continuation = m_out.newBlock();
2865 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2867 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2868 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
2869 m_out.jump(continuation);
2871 J_JITOperation_EJI getByIdFunction;
2872 if (type == AccessType::Get)
2873 getByIdFunction = operationGetByIdGeneric;
2875 getByIdFunction = operationTryGetByIdGeneric;
2877 m_out.appendTo(notCellCase, continuation);
2878 ValueFromBlock notCellResult = m_out.anchor(vmCall(
2879 Int64, m_out.operation(getByIdFunction),
2881 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
2882 m_out.jump(continuation);
2884 m_out.appendTo(continuation, lastNext);
2885 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
2890 DFG_CRASH(m_graph, m_node, "Bad use kind");
2895 void compileGetByIdWithThis()
2897 LValue base = lowJSValue(m_node->child1());
2898 LValue thisValue = lowJSValue(m_node->child2());
2899 LValue result = vmCall(Int64, m_out.operation(operationGetByIdWithThis), m_callFrame, base, thisValue, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
2903 void compileGetByValWithThis()
2905 LValue base = lowJSValue(m_node->child1());
2906 LValue thisValue = lowJSValue(m_node->child2());
2907 LValue subscript = lowJSValue(m_node->child3());
2909 LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
2913 void compilePutByIdWithThis()
2915 LValue base = lowJSValue(m_node->child1());
2916 LValue thisValue = lowJSValue(m_node->child2());
2917 LValue value = lowJSValue(m_node->child3());
2919 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
2920 m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
2923 void compilePutByValWithThis()
2925 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
2926 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
2927 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
2928 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
2930 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
2931 m_callFrame, base, thisValue, property, value);
2934 void compileDefineDataProperty()
2936 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
2937 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
2938 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 3));
2939 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
2940 switch (propertyEdge.useKind()) {
2942 LValue property = lowString(propertyEdge);
2943 vmCall(Void, m_out.operation(operationDefineDataPropertyString), m_callFrame, base, property, value, attributes);
2946 case StringIdentUse: {
2947 LValue property = lowStringIdent(propertyEdge);
2948 vmCall(Void, m_out.operation(operationDefineDataPropertyStringIdent), m_callFrame, base, property, value, attributes);
2952 LValue property = lowSymbol(propertyEdge);
2953 vmCall(Void, m_out.operation(operationDefineDataPropertySymbol), m_callFrame, base, property, value, attributes);
2957 LValue property = lowJSValue(propertyEdge);
2958 vmCall(Void, m_out.operation(operationDefineDataProperty), m_callFrame, base, property, value, attributes);
2962 RELEASE_ASSERT_NOT_REACHED();
2966 void compileDefineAccessorProperty()
2968 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
2969 LValue getter = lowCell(m_graph.varArgChild(m_node, 2));
2970 LValue setter = lowCell(m_graph.varArgChild(m_node, 3));
2971 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 4));
2972 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
2973 switch (propertyEdge.useKind()) {
2975 LValue property = lowString(propertyEdge);
2976 vmCall(Void, m_out.operation(operationDefineAccessorPropertyString), m_callFrame, base, property, getter, setter, attributes);
2979 case StringIdentUse: {
2980 LValue property = lowStringIdent(propertyEdge);
2981 vmCall(Void, m_out.operation(operationDefineAccessorPropertyStringIdent), m_callFrame, base, property, getter, setter, attributes);
2985 LValue property = lowSymbol(propertyEdge);
2986 vmCall(Void, m_out.operation(operationDefineAccessorPropertySymbol), m_callFrame, base, property, getter, setter, attributes);
2990 LValue property = lowJSValue(propertyEdge);
2991 vmCall(Void, m_out.operation(operationDefineAccessorProperty), m_callFrame, base, property, getter, setter, attributes);
2995 RELEASE_ASSERT_NOT_REACHED();
2999 void compilePutById()
3001 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse);
3003 Node* node = m_node;
3004 LValue base = lowCell(node->child1());
3005 LValue value = lowJSValue(node->child2());
3006 auto uid = m_graph.identifiers()[node->identifierNumber()];
3008 B3::PatchpointValue* patchpoint = m_out.patchpoint(Void);
3009 patchpoint->appendSomeRegister(base);
3010 patchpoint->appendSomeRegister(value);
3011 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
3012 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
3013 patchpoint->clobber(RegisterSet::macroScratchRegisters());
3015 // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
3016 // https://bugs.webkit.org/show_bug.cgi?id=152848
3018 RefPtr<PatchpointExceptionHandle> exceptionHandle =
3019 preparePatchpointForExceptions(patchpoint);
3021 State* state = &m_ftlState;
3022 ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->ecmaMode();
3024 patchpoint->setGenerator(
3025 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3026 AllowMacroScratchRegisterUsage allowScratch(jit);
3028 CallSiteIndex callSiteIndex =
3029 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
3031 Box<CCallHelpers::JumpList> exceptions =
3032 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
3034 // JS setter call ICs generated by the PutById IC will need this.
3035 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
3037 auto generator = Box<JITPutByIdGenerator>::create(
3038 jit.codeBlock(), node->origin.semantic, callSiteIndex,
3039 params.unavailableRegisters(), JSValueRegs(params[0].gpr()),
3040 JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode,
3041 node->op() == PutByIdDirect ? Direct : NotDirect);
3043 generator->generateFastPath(jit);
3044 CCallHelpers::Label done = jit.label();
3047 [=] (CCallHelpers& jit) {
3048 AllowMacroScratchRegisterUsage allowScratch(jit);
3050 generator->slowPathJump().link(&jit);
3051 CCallHelpers::Label slowPathBegin = jit.label();
3052 CCallHelpers::Call slowPathCall = callOperation(
3053 *state, params.unavailableRegisters(), jit, node->origin.semantic,
3054 exceptions.get(), generator->slowPathFunction(), InvalidGPRReg,
3055 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
3056 params[0].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
3057 jit.jump().linkTo(done, &jit);
3059 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
3062 [=] (LinkBuffer& linkBuffer) {
3063 generator->finalize(linkBuffer);
3069 void compileGetButterfly()
3071 setStorage(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly));
3074 void compileConstantStoragePointer()
3076 setStorage(m_out.constIntPtr(m_node->storagePointer()));
3079 void compileGetIndexedPropertyStorage()
3081 LValue cell = lowCell(m_node->child1());
3083 if (m_node->arrayMode().type() == Array::String) {
3084 LBasicBlock slowPath = m_out.newBlock();
3085 LBasicBlock continuation = m_out.newBlock();
3087 LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
3088 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
3091 m_out.notNull(fastResultValue), usually(continuation), rarely(slowPath));
3093 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
3095 ValueFromBlock slowResult = m_out.anchor(
3096 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, cell));
3098 m_out.jump(continuation);
3100 m_out.appendTo(continuation, lastNext);
3102 setStorage(m_out.loadPtr(m_out.phi(pointerType(), fastResult, slowResult), m_heaps.StringImpl_data));
3106 DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType()));
3107 setStorage(m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector));
3110 void compileCheckArray()
3112 Edge edge = m_node->child1();
3113 LValue cell = lowCell(edge);
3115 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
3119 BadIndexingType, jsValueValue(cell), 0,
3120 m_out.logicalNot(isArrayType(cell, m_node->arrayMode())));
3123 void compileGetTypedArrayByteOffset()
3125 LValue basePtr = lowCell(m_node->child1());
3127 LBasicBlock simpleCase = m_out.newBlock();
3128 LBasicBlock wastefulCase = m_out.newBlock();
3129 LBasicBlock continuation = m_out.newBlock();
3131 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
3133 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
3134 unsure(simpleCase), unsure(wastefulCase));
3136 LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
3138 ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
3140 m_out.jump(continuation);
3142 m_out.appendTo(wastefulCase, continuation);
3144 LValue vectorPtr = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
3145 LValue butterflyPtr = m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly);
3146 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
3147 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
3149 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
3151 m_out.jump(continuation);