2 * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLLowerDFGToB3.h"
32 #include "AirGenerationContext.h"
33 #include "AllowMacroScratchRegisterUsage.h"
34 #include "AtomicsObject.h"
35 #include "B3CheckValue.h"
36 #include "B3FenceValue.h"
37 #include "B3PatchpointValue.h"
38 #include "B3SlotBaseValue.h"
39 #include "B3StackmapGenerationParams.h"
40 #include "B3ValueInlines.h"
41 #include "CallFrameShuffler.h"
42 #include "CodeBlockWithJITType.h"
43 #include "DFGAbstractInterpreterInlines.h"
44 #include "DFGCapabilities.h"
45 #include "DFGDominators.h"
46 #include "DFGInPlaceAbstractState.h"
47 #include "DFGOSRAvailabilityAnalysisPhase.h"
48 #include "DFGOSRExitFuzz.h"
49 #include "DirectArguments.h"
50 #include "FTLAbstractHeapRepository.h"
51 #include "FTLAvailableRecovery.h"
52 #include "FTLExceptionTarget.h"
53 #include "FTLForOSREntryJITCode.h"
54 #include "FTLFormattedValue.h"
55 #include "FTLLazySlowPathCall.h"
56 #include "FTLLoweredNodeValue.h"
57 #include "FTLOperations.h"
58 #include "FTLOutput.h"
59 #include "FTLPatchpointExceptionHandle.h"
60 #include "FTLSnippetParams.h"
61 #include "FTLThunks.h"
62 #include "FTLWeightedTarget.h"
63 #include "JITAddGenerator.h"
64 #include "JITBitAndGenerator.h"
65 #include "JITBitOrGenerator.h"
66 #include "JITBitXorGenerator.h"
67 #include "JITDivGenerator.h"
68 #include "JITInlineCacheGenerator.h"
69 #include "JITLeftShiftGenerator.h"
70 #include "JITMathIC.h"
71 #include "JITMulGenerator.h"
72 #include "JITRightShiftGenerator.h"
73 #include "JITSubGenerator.h"
74 #include "JSAsyncFunction.h"
75 #include "JSAsyncGeneratorFunction.h"
76 #include "JSCInlines.h"
77 #include "JSGeneratorFunction.h"
78 #include "JSLexicalEnvironment.h"
80 #include "OperandsInlines.h"
81 #include "ScopedArguments.h"
82 #include "ScopedArgumentsTable.h"
83 #include "ScratchRegisterAllocator.h"
84 #include "SetupVarargsFrame.h"
85 #include "ShadowChicken.h"
86 #include "StructureStubInfo.h"
87 #include "SuperSampler.h"
88 #include "ThunkGenerators.h"
89 #include "VirtualRegister.h"
92 #include <unordered_set>
94 #include <wtf/Gigacage.h>
96 namespace JSC { namespace FTL {
103 std::atomic<int> compileCounter;
106 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
107 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
109 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
110 if (nodeIndex != UINT_MAX)
111 dataLog(", node @", nodeIndex);
117 // Using this instead of typeCheck() helps to reduce the load on B3, by creating
118 // significantly less dead code.
119 #define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
120 FormattedValue _ftc_lowValue = (lowValue); \
121 Edge _ftc_highValue = (highValue); \
122 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
123 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
125 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
128 #define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
129 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
132 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
134 LowerDFGToB3(State& state)
135 : m_graph(state.graph)
138 , m_proc(*state.proc)
139 , m_availabilityCalculator(m_graph)
140 , m_state(state.graph)
141 , m_interpreter(state.graph, m_state)
147 State* state = &m_ftlState;
150 if (verboseCompilationEnabled()) {
152 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
153 "_", codeBlock()->hash());
158 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
159 CodeBlock* codeBlock = m_graph.m_codeBlock;
161 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
162 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
163 AllowMacroScratchRegisterUsage allowScratch(jit);
164 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
165 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
166 jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
169 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
170 RELEASE_ASSERT(catchEntrypointIndex != 0);
171 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
174 if (m_graph.m_maxLocalsForCatchOSREntry) {
175 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
176 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
180 m_graph.ensureSSADominators();
182 if (verboseCompilationEnabled())
183 dataLog("Function ready, beginning lowering.\n");
185 m_out.initialize(m_heaps);
187 // We use prologue frequency for all of the initialization code.
188 m_out.setFrequency(1);
190 LBasicBlock prologue = m_out.newBlock();
191 LBasicBlock callEntrypointArgumentSpeculations = m_out.newBlock();
192 m_handleExceptions = m_out.newBlock();
194 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
195 m_highBlock = m_graph.block(blockIndex);
198 m_out.setFrequency(m_highBlock->executionCount);
199 m_blocks.add(m_highBlock, m_out.newBlock());
202 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
203 m_out.setFrequency(1);
205 m_out.appendTo(prologue, callEntrypointArgumentSpeculations);
206 m_out.initializeConstants(m_proc, prologue);
207 createPhiVariables();
209 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
210 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
211 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
212 state->capturedValue = capturedBase->slot();
214 auto preOrder = m_graph.blocksInPreOrder();
216 m_callFrame = m_out.framePointer();
217 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
218 m_tagMask = m_out.constInt64(TagMask);
220 // Make sure that B3 knows that we really care about the mask registers. This forces the
221 // constants to be materialized in registers.
222 m_proc.addFastConstant(m_tagTypeNumber->key());
223 m_proc.addFastConstant(m_tagMask->key());
225 // We don't want the CodeBlock to have a weak pointer to itself because
226 // that would cause it to always get collected.
227 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
229 VM* vm = &this->vm();
231 // Stack Overflow Check.
232 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
233 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
234 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
235 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
236 stackOverflowHandler->appendSomeRegister(m_callFrame);
237 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
238 stackOverflowHandler->numGPScratchRegisters = 1;
239 stackOverflowHandler->setGenerator(
240 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
241 AllowMacroScratchRegisterUsage allowScratch(jit);
242 GPRReg fp = params[0].gpr();
243 GPRReg scratch = params.gpScratch(0);
245 unsigned ftlFrameSize = params.proc().frameSize();
246 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
248 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
249 MacroAssembler::JumpList stackOverflow;
250 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
251 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
252 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
254 params.addLatePath([=] (CCallHelpers& jit) {
255 AllowMacroScratchRegisterUsage allowScratch(jit);
257 stackOverflow.link(&jit);
259 // FIXME: We would not have to do this if the stack check was part of the Air
260 // prologue. Then, we would know that there is no way for the callee-saves to
262 // https://bugs.webkit.org/show_bug.cgi?id=172456
263 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
266 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
267 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
268 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
270 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
271 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
272 CCallHelpers::Call throwCall = jit.call();
274 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
275 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
276 CCallHelpers::Call lookupExceptionHandlerCall = jit.call();
277 jit.jumpToExceptionHandler(*vm);
280 [=] (LinkBuffer& linkBuffer) {
281 linkBuffer.link(throwCall, FunctionPtr(operationThrowStackOverflowError));
282 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
287 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
290 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
291 successors[0] = callEntrypointArgumentSpeculations;
292 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
293 // Currently, the only other entrypoint is an op_catch entrypoint.
294 // We do OSR entry at op_catch, and we prove argument formats before
295 // jumping to FTL code, so we don't need to check argument types here
296 // for these entrypoints.
297 successors[i] = firstDFGBasicBlock;
300 m_out.entrySwitch(successors);
301 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
304 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
307 availabilityMap().clear();
308 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
309 for (unsigned i = codeBlock()->numParameters(); i--;) {
310 availabilityMap().m_locals.argument(i) =
311 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
314 for (unsigned i = codeBlock()->numParameters(); i--;) {
315 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
316 VirtualRegister operand = virtualRegisterForArgument(i);
317 LValue jsValue = m_out.load64(addressFor(operand));
319 switch (m_graph.m_argumentFormats[0][i]) {
321 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
324 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
327 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
332 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
336 m_out.jump(firstDFGBasicBlock);
340 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
341 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
342 m_out.patchpoint(Void)->setGenerator(
343 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
344 CCallHelpers::Jump jump = jit.jump();
346 [=] (LinkBuffer& linkBuffer) {
347 linkBuffer.link(jump, linkBuffer.locationOf(*exceptionHandler));
352 for (DFG::BasicBlock* block : preOrder)
355 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
356 // to happen last because our abstract heaps are generated lazily. They have to be
357 // generated lazily because we have an infinite number of numbered, indexed, and
358 // absolute heaps. We only become aware of the ones we actually mention while lowering.
359 m_heaps.computeRangesAndDecorateInstructions();
361 // We create all Phi's up front, but we may then decide not to compile the basic block
362 // that would have contained one of them. So this creates orphans, which triggers B3
363 // validation failures. Calling this fixes the issue.
365 // Note that you should avoid the temptation to make this call conditional upon
366 // validation being enabled. B3 makes no guarantees of any kind of correctness when
367 // dealing with IR that would have failed validation. For example, it would be valid to
368 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
369 // if any orphans were around. We might even have such phases already.
370 m_proc.deleteOrphans();
372 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
373 m_out.applyBlockOrder();
378 void createPhiVariables()
380 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
381 DFG::BasicBlock* block = m_graph.block(blockIndex);
384 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
385 Node* node = block->at(nodeIndex);
386 if (node->op() != DFG::Phi)
389 switch (node->flags() & NodeResultMask) {
390 case NodeResultDouble:
393 case NodeResultInt32:
396 case NodeResultInt52:
399 case NodeResultBoolean:
406 DFG_CRASH(m_graph, node, "Bad Phi node result type");
409 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
414 void compileBlock(DFG::BasicBlock* block)
419 if (verboseCompilationEnabled())
420 dataLog("Compiling block ", *block, "\n");
424 // Make sure that any blocks created while lowering code in the high block have the frequency of
425 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
426 // something roughly approximate for things like register allocation.
427 m_out.setFrequency(m_highBlock->executionCount);
429 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
432 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
433 m_nextHighBlock = m_graph.block(nextBlockIndex);
437 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
439 // All of this effort to find the next block gives us the ability to keep the
440 // generated IR in roughly program order. This ought not affect the performance
441 // of the generated code (since we expect B3 to reorder things) but it will
442 // make IR dumps easier to read.
443 m_out.appendTo(lowBlock, m_nextLowBlock);
445 if (Options::ftlCrashes())
448 if (!m_highBlock->cfaHasVisited) {
449 if (verboseCompilationEnabled())
450 dataLog("Bailing because CFA didn't reach.\n");
451 crash(m_highBlock, nullptr);
455 m_availabilityCalculator.beginBlock(m_highBlock);
458 m_state.beginBasicBlock(m_highBlock);
460 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
461 if (!compileNode(m_nodeIndex))
466 void safelyInvalidateAfterTermination()
468 if (verboseCompilationEnabled())
469 dataLog("Bailing.\n");
472 // Invalidate dominated blocks. Under normal circumstances we would expect
473 // them to be invalidated already. But you can have the CFA become more
474 // precise over time because the structures of objects change on the main
475 // thread. Failing to do this would result in weird crashes due to a value
476 // being used but not defined. Race conditions FTW!
477 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
478 DFG::BasicBlock* target = m_graph.block(blockIndex);
481 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
482 if (verboseCompilationEnabled())
483 dataLog("Block ", *target, " will bail also.\n");
484 target->cfaHasVisited = false;
489 bool compileNode(unsigned nodeIndex)
491 if (!m_state.isValid()) {
492 safelyInvalidateAfterTermination();
496 m_node = m_highBlock->at(nodeIndex);
497 m_origin = m_node->origin;
498 m_out.setOrigin(m_node);
500 if (verboseCompilationEnabled())
501 dataLog("Lowering ", m_node, "\n");
503 m_availableRecoveries.shrink(0);
505 m_interpreter.startExecuting();
506 m_interpreter.executeKnownEdgeTypes(m_node);
508 switch (m_node->op()) {
518 compileDoubleConstant();
521 compileInt52Constant();
524 compileLazyJSConstant();
530 compileDoubleAsInt32();
539 compileValueToInt32();
541 case BooleanToNumber:
542 compileBooleanToNumber();
544 case ExtractOSREntryLocal:
545 compileExtractOSREntryLocal();
547 case ExtractCatchLocal:
548 compileExtractCatchLocal();
560 case CallObjectConstructor:
561 compileToObjectOrCallObjectConstructor();
574 compileArithAddOrSub();
590 compileArithMinOrMax();
599 compileArithRandom();
617 compileArithFRound();
620 compileArithNegate();
644 compileUInt32ToNumber();
647 compileCheckStructure();
649 case CheckStructureOrEmpty:
650 compileCheckStructureOrEmpty();
656 compileCheckNotEmpty();
659 compileCheckBadCell();
661 case CheckStringIdent:
662 compileCheckStringIdent();
665 compileGetExecutable();
668 case ArrayifyToStructure:
672 compilePutStructure();
675 compileGetById(AccessType::TryGet);
679 compileGetById(AccessType::Get);
681 case GetByIdWithThis:
682 compileGetByIdWithThis();
688 compileHasOwnProperty();
695 case PutByIdWithThis:
696 compilePutByIdWithThis();
700 compilePutAccessorById();
702 case PutGetterSetterById:
703 compilePutGetterSetterById();
707 compilePutAccessorByVal();
713 compileDeleteByVal();
716 compileGetButterfly();
718 case ConstantStoragePointer:
719 compileConstantStoragePointer();
721 case GetIndexedPropertyStorage:
722 compileGetIndexedPropertyStorage();
728 compileGetArrayLength();
730 case GetVectorLength:
731 compileGetVectorLength();
734 compileCheckInBounds();
739 case GetMyArgumentByVal:
740 case GetMyArgumentByValOutOfBounds:
741 compileGetMyArgumentByVal();
743 case GetByValWithThis:
744 compileGetByValWithThis();
751 case PutByValWithThis:
752 compilePutByValWithThis();
756 case AtomicsCompareExchange:
757 case AtomicsExchange:
763 compileAtomicsReadModifyWrite();
765 case AtomicsIsLockFree:
766 compileAtomicsIsLockFree();
768 case DefineDataProperty:
769 compileDefineDataProperty();
771 case DefineAccessorProperty:
772 compileDefineAccessorProperty();
784 compileArrayIndexOf();
786 case CreateActivation:
787 compileCreateActivation();
790 compilePushWithScope();
793 case NewGeneratorFunction:
794 case NewAsyncGeneratorFunction:
795 case NewAsyncFunction:
796 compileNewFunction();
798 case CreateDirectArguments:
799 compileCreateDirectArguments();
801 case CreateScopedArguments:
802 compileCreateScopedArguments();
804 case CreateClonedArguments:
805 compileCreateClonedArguments();
810 case NewStringObject:
811 compileNewStringObject();
816 case NewArrayWithSpread:
817 compileNewArrayWithSpread();
823 compileNewArrayBuffer();
825 case NewArrayWithSize:
826 compileNewArrayWithSize();
829 compileNewTypedArray();
831 case GetTypedArrayByteOffset:
832 compileGetTypedArrayByteOffset();
835 compileGetPrototypeOf();
837 case AllocatePropertyStorage:
838 compileAllocatePropertyStorage();
840 case ReallocatePropertyStorage:
841 compileReallocatePropertyStorage();
843 case NukeStructureAndSetButterfly:
844 compileNukeStructureAndSetButterfly();
850 case CallStringConstructor:
851 compileToStringOrCallStringConstructor();
854 compileToPrimitive();
860 compileStringCharAt();
862 case StringCharCodeAt:
863 compileStringCharCodeAt();
865 case StringFromCharCode:
866 compileStringFromCharCode();
869 case GetGetterSetterByOffset:
870 compileGetByOffset();
878 case MultiGetByOffset:
879 compileMultiGetByOffset();
882 compilePutByOffset();
884 case MultiPutByOffset:
885 compileMultiPutByOffset();
888 case GetGlobalLexicalVariable:
889 compileGetGlobalVariable();
891 case PutGlobalVariable:
892 compilePutGlobalVariable();
895 compileNotifyWrite();
900 case GetArgumentCountIncludingThis:
901 compileGetArgumentCountIncludingThis();
909 case GetGlobalObject:
910 compileGetGlobalObject();
913 compileGetGlobalThis();
916 compileGetClosureVar();
919 compilePutClosureVar();
921 case GetFromArguments:
922 compileGetFromArguments();
925 compilePutToArguments();
928 compileGetArgument();
933 case CompareStrictEq:
934 compileCompareStrictEq();
937 compileCompareLess();
940 compileCompareLessEq();
943 compileCompareGreater();
945 case CompareGreaterEq:
946 compileCompareGreaterEq();
949 compileCompareBelow();
952 compileCompareBelowEq();
955 compileCompareEqPtr();
961 case TailCallInlinedCaller:
963 compileCallOrConstruct();
966 case DirectTailCallInlinedCaller:
967 case DirectConstruct:
969 compileDirectCallOrConstruct();
975 case CallForwardVarargs:
976 case TailCallVarargs:
977 case TailCallVarargsInlinedCaller:
978 case TailCallForwardVarargs:
979 case TailCallForwardVarargsInlinedCaller:
980 case ConstructVarargs:
981 case ConstructForwardVarargs:
982 compileCallOrConstructVarargs();
988 compileLoadVarargs();
991 compileForwardVarargs();
1002 case DFG::EntrySwitch:
1003 compileEntrySwitch();
1009 compileForceOSRExit();
1013 compileCPUIntrinsic();
1015 RELEASE_ASSERT_NOT_REACHED();
1021 case ThrowStaticError:
1022 compileThrowStaticError();
1024 case InvalidationPoint:
1025 compileInvalidationPoint();
1031 compileIsUndefined();
1039 case IsCellWithType:
1040 compileIsCellWithType();
1045 case NormalizeMapKey:
1046 compileNormalizeMapKey();
1049 compileGetMapBucket();
1051 case GetMapBucketHead:
1052 compileGetMapBucketHead();
1054 case GetMapBucketNext:
1055 compileGetMapBucketNext();
1057 case LoadKeyFromMapBucket:
1058 compileLoadKeyFromMapBucket();
1060 case LoadValueFromMapBucket:
1061 compileLoadValueFromMapBucket();
1070 compileWeakMapGet();
1075 case IsObjectOrNull:
1076 compileIsObjectOrNull();
1079 compileIsFunction();
1081 case IsTypedArrayView:
1082 compileIsTypedArrayView();
1090 case CheckTypeInfoFlags:
1091 compileCheckTypeInfoFlags();
1093 case OverridesHasInstance:
1094 compileOverridesHasInstance();
1097 compileInstanceOf();
1099 case InstanceOfCustom:
1100 compileInstanceOfCustom();
1102 case CountExecution:
1103 compileCountExecution();
1105 case SuperSamplerBegin:
1106 compileSuperSamplerBegin();
1108 case SuperSamplerEnd:
1109 compileSuperSamplerEnd();
1112 case FencedStoreBarrier:
1113 compileStoreBarrier();
1115 case HasIndexedProperty:
1116 compileHasIndexedProperty();
1118 case HasGenericProperty:
1119 compileHasGenericProperty();
1121 case HasStructureProperty:
1122 compileHasStructureProperty();
1124 case GetDirectPname:
1125 compileGetDirectPname();
1127 case GetEnumerableLength:
1128 compileGetEnumerableLength();
1130 case GetPropertyEnumerator:
1131 compileGetPropertyEnumerator();
1133 case GetEnumeratorStructurePname:
1134 compileGetEnumeratorStructurePname();
1136 case GetEnumeratorGenericPname:
1137 compileGetEnumeratorGenericPname();
1140 compileToIndexString();
1142 case CheckStructureImmediate:
1143 compileCheckStructureImmediate();
1145 case MaterializeNewObject:
1146 compileMaterializeNewObject();
1148 case MaterializeCreateActivation:
1149 compileMaterializeCreateActivation();
1152 if (Options::usePollingTraps())
1153 compileCheckTraps();
1156 compileCreateRest();
1159 compileGetRestLength();
1162 compileRegExpExec();
1165 compileRegExpTest();
1170 case SetFunctionName:
1171 compileSetFunctionName();
1174 case StringReplaceRegExp:
1175 compileStringReplace();
1177 case GetRegExpObjectLastIndex:
1178 compileGetRegExpObjectLastIndex();
1180 case SetRegExpObjectLastIndex:
1181 compileSetRegExpObjectLastIndex();
1183 case LogShadowChickenPrologue:
1184 compileLogShadowChickenPrologue();
1186 case LogShadowChickenTail:
1187 compileLogShadowChickenTail();
1189 case RecordRegExpCachedResult:
1190 compileRecordRegExpCachedResult();
1192 case ResolveScopeForHoistingFuncDeclInEval:
1193 compileResolveScopeForHoistingFuncDeclInEval();
1196 compileResolveScope();
1199 compileGetDynamicVar();
1202 compilePutDynamicVar();
1205 compileUnreachable();
1208 compileStringSlice();
1211 compileToLowerCase();
1213 case NumberToStringWithRadix:
1214 compileNumberToStringWithRadix();
1216 case NumberToStringWithValidRadixConstant:
1217 compileNumberToStringWithValidRadixConstant();
1220 compileCheckSubClass();
1226 compileCallDOMGetter();
1234 case PhantomNewObject:
1235 case PhantomNewFunction:
1236 case PhantomNewGeneratorFunction:
1237 case PhantomNewAsyncGeneratorFunction:
1238 case PhantomNewAsyncFunction:
1239 case PhantomCreateActivation:
1240 case PhantomDirectArguments:
1241 case PhantomCreateRest:
1243 case PhantomNewArrayWithSpread:
1244 case PhantomClonedArguments:
1248 case InitializeEntrypointArguments:
1251 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1255 if (m_node->isTerminal())
1258 if (!m_state.isValid()) {
1259 safelyInvalidateAfterTermination();
1263 m_availabilityCalculator.executeNode(m_node);
1264 m_interpreter.executeEffects(nodeIndex);
1269 void compileUpsilon()
1271 LValue upsilonValue = nullptr;
1272 switch (m_node->child1().useKind()) {
1274 upsilonValue = lowDouble(m_node->child1());
1278 upsilonValue = lowInt32(m_node->child1());
1281 upsilonValue = lowInt52(m_node->child1());
1284 case KnownBooleanUse:
1285 upsilonValue = lowBoolean(m_node->child1());
1289 upsilonValue = lowCell(m_node->child1());
1292 upsilonValue = lowJSValue(m_node->child1());
1295 DFG_CRASH(m_graph, m_node, "Bad use kind");
1298 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1299 LValue phiNode = m_phis.get(m_node->phi());
1300 m_out.addIncomingToPhi(phiNode, upsilon);
1305 LValue phi = m_phis.get(m_node);
1306 m_out.m_block->append(phi);
1308 switch (m_node->flags() & NodeResultMask) {
1309 case NodeResultDouble:
1312 case NodeResultInt32:
1315 case NodeResultInt52:
1318 case NodeResultBoolean:
1325 DFG_CRASH(m_graph, m_node, "Bad use kind");
1330 void compileDoubleConstant()
1332 setDouble(m_out.constDouble(m_node->asNumber()));
1335 void compileInt52Constant()
1337 int64_t value = m_node->asAnyInt();
1339 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1340 setStrictInt52(m_out.constInt64(value));
1343 void compileLazyJSConstant()
1345 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1346 LazyJSValue value = m_node->lazyJSValue();
1347 patchpoint->setGenerator(
1348 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1349 value.emit(jit, JSValueRegs(params[0].gpr()));
1351 patchpoint->effects = Effects::none();
1352 setJSValue(patchpoint);
1355 void compileDoubleRep()
1357 switch (m_node->child1().useKind()) {
1358 case RealNumberUse: {
1359 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1361 LValue doubleValue = unboxDouble(value);
1363 LBasicBlock intCase = m_out.newBlock();
1364 LBasicBlock continuation = m_out.newBlock();
1366 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1368 m_out.doubleEqual(doubleValue, doubleValue),
1369 usually(continuation), rarely(intCase));
1371 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1374 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1375 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1376 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1377 m_out.jump(continuation);
1379 m_out.appendTo(continuation, lastNext);
1381 setDouble(m_out.phi(Double, fastResult, slowResult));
1387 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1389 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1391 LBasicBlock intCase = m_out.newBlock();
1392 LBasicBlock doubleTesting = m_out.newBlock();
1393 LBasicBlock doubleCase = m_out.newBlock();
1394 LBasicBlock nonDoubleCase = m_out.newBlock();
1395 LBasicBlock continuation = m_out.newBlock();
1398 isNotInt32(value, provenType(m_node->child1())),
1399 unsure(doubleTesting), unsure(intCase));
1401 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1403 ValueFromBlock intToDouble = m_out.anchor(
1404 m_out.intToDouble(unboxInt32(value)));
1405 m_out.jump(continuation);
1407 m_out.appendTo(doubleTesting, doubleCase);
1408 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1409 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1411 m_out.appendTo(doubleCase, nonDoubleCase);
1412 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1413 m_out.jump(continuation);
1415 if (shouldConvertNonNumber) {
1416 LBasicBlock undefinedCase = m_out.newBlock();
1417 LBasicBlock testNullCase = m_out.newBlock();
1418 LBasicBlock nullCase = m_out.newBlock();
1419 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1420 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1421 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1423 m_out.appendTo(nonDoubleCase, undefinedCase);
1424 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1425 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1427 m_out.appendTo(undefinedCase, testNullCase);
1428 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1429 m_out.jump(continuation);
1431 m_out.appendTo(testNullCase, nullCase);
1432 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1433 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1435 m_out.appendTo(nullCase, testBooleanTrueCase);
1436 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1437 m_out.jump(continuation);
1439 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1440 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1441 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1443 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1444 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1445 m_out.jump(continuation);
1447 m_out.appendTo(convertBooleanFalseCase, continuation);
1449 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1450 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
1451 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1452 m_out.jump(continuation);
1454 m_out.appendTo(continuation, lastNext);
1455 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1458 m_out.appendTo(nonDoubleCase, continuation);
1459 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1460 m_out.unreachable();
1462 m_out.appendTo(continuation, lastNext);
1464 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1469 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1474 DFG_CRASH(m_graph, m_node, "Bad use kind");
1478 void compileDoubleAsInt32()
1480 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1481 setInt32(integerValue);
1484 void compileValueRep()
1486 switch (m_node->child1().useKind()) {
1487 case DoubleRepUse: {
1488 LValue value = lowDouble(m_node->child1());
1490 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1491 value = m_out.select(
1492 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1495 setJSValue(boxDouble(value));
1500 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1505 DFG_CRASH(m_graph, m_node, "Bad use kind");
1509 void compileInt52Rep()
1511 switch (m_node->child1().useKind()) {
1513 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1518 jsValueToStrictInt52(
1519 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1522 case DoubleRepAnyIntUse:
1524 doubleToStrictInt52(
1525 m_node->child1(), lowDouble(m_node->child1())));
1529 RELEASE_ASSERT_NOT_REACHED();
1533 void compileValueToInt32()
1535 switch (m_node->child1().useKind()) {
1537 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1541 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1546 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1547 if (isValid(value)) {
1548 setInt32(value.value());
1552 value = m_jsValueValues.get(m_node->child1().node());
1553 if (isValid(value)) {
1554 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1558 // We'll basically just get here for constants. But it's good to have this
1559 // catch-all since we often add new representations into the mix.
1561 numberOrNotCellToInt32(
1563 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1568 DFG_CRASH(m_graph, m_node, "Bad use kind");
1573 void compileBooleanToNumber()
1575 switch (m_node->child1().useKind()) {
1577 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1582 LValue value = lowJSValue(m_node->child1());
1584 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1585 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1589 LBasicBlock booleanCase = m_out.newBlock();
1590 LBasicBlock continuation = m_out.newBlock();
1592 ValueFromBlock notBooleanResult = m_out.anchor(value);
1594 isBoolean(value, provenType(m_node->child1())),
1595 unsure(booleanCase), unsure(continuation));
1597 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1598 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1599 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1600 m_out.jump(continuation);
1602 m_out.appendTo(continuation, lastNext);
1603 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1608 RELEASE_ASSERT_NOT_REACHED();
1613 void compileExtractOSREntryLocal()
1615 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1616 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1617 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1620 void compileExtractCatchLocal()
1622 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
1623 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
1626 void compileGetStack()
1628 StackAccessData* data = m_node->stackAccessData();
1629 AbstractValue& value = m_state.variables().operand(data->local);
1631 DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
1632 DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
1634 if (isInt32Speculation(value.m_type))
1635 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1637 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1640 void compilePutStack()
1642 StackAccessData* data = m_node->stackAccessData();
1643 switch (data->format) {
1644 case FlushedJSValue: {
1645 LValue value = lowJSValue(m_node->child1());
1646 m_out.store64(value, addressFor(data->machineLocal));
1650 case FlushedDouble: {
1651 LValue value = lowDouble(m_node->child1());
1652 m_out.storeDouble(value, addressFor(data->machineLocal));
1656 case FlushedInt32: {
1657 LValue value = lowInt32(m_node->child1());
1658 m_out.store32(value, payloadFor(data->machineLocal));
1662 case FlushedInt52: {
1663 LValue value = lowInt52(m_node->child1());
1664 m_out.store64(value, addressFor(data->machineLocal));
1669 LValue value = lowCell(m_node->child1());
1670 m_out.store64(value, addressFor(data->machineLocal));
1674 case FlushedBoolean: {
1675 speculateBoolean(m_node->child1());
1677 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1678 addressFor(data->machineLocal));
1683 DFG_CRASH(m_graph, m_node, "Bad flush format");
1690 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1693 void compileToObjectOrCallObjectConstructor()
1695 LValue value = lowJSValue(m_node->child1());
1697 LBasicBlock isCellCase = m_out.newBlock();
1698 LBasicBlock slowCase = m_out.newBlock();
1699 LBasicBlock continuation = m_out.newBlock();
1701 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1703 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1704 ValueFromBlock fastResult = m_out.anchor(value);
1705 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
1707 m_out.appendTo(slowCase, continuation);
1709 ValueFromBlock slowResult;
1710 if (m_node->op() == ToObject) {
1711 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
1712 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToObject), m_callFrame, weakPointer(globalObject), value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
1714 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCallObjectConstructor), m_callFrame, frozenPointer(m_node->cellOperand()), value));
1715 m_out.jump(continuation);
1717 m_out.appendTo(continuation, lastNext);
1718 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1721 void compileToThis()
1723 LValue value = lowJSValue(m_node->child1());
1725 LBasicBlock isCellCase = m_out.newBlock();
1726 LBasicBlock slowCase = m_out.newBlock();
1727 LBasicBlock continuation = m_out.newBlock();
1730 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1732 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1733 ValueFromBlock fastResult = m_out.anchor(value);
1736 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
1737 m_out.constInt32(OverridesToThis)),
1738 usually(continuation), rarely(slowCase));
1740 m_out.appendTo(slowCase, continuation);
1741 J_JITOperation_EJ function;
1742 if (m_graph.isStrictModeFor(m_node->origin.semantic))
1743 function = operationToThisStrict;
1745 function = operationToThis;
1746 ValueFromBlock slowResult = m_out.anchor(
1747 vmCall(Int64, m_out.operation(function), m_callFrame, value));
1748 m_out.jump(continuation);
1750 m_out.appendTo(continuation, lastNext);
1751 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1754 void compileValueAdd()
1756 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
1757 JITAddIC* addIC = codeBlock()->addJITAddIC(arithProfile);
1758 auto repatchingFunction = operationValueAddOptimize;
1759 auto nonRepatchingFunction = operationValueAdd;
1760 compileMathIC(addIC, repatchingFunction, nonRepatchingFunction);
1763 template <typename Generator>
1764 void compileMathIC(JITUnaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1766 Node* node = m_node;
1768 LValue operand = lowJSValue(node->child1());
1770 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1771 patchpoint->appendSomeRegister(operand);
1772 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1773 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1774 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
1775 patchpoint->numGPScratchRegisters = 1;
1776 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1777 State* state = &m_ftlState;
1778 patchpoint->setGenerator(
1779 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1780 AllowMacroScratchRegisterUsage allowScratch(jit);
1782 Box<CCallHelpers::JumpList> exceptions =
1783 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1785 #if ENABLE(MATH_IC_STATS)
1786 auto inlineStart = jit.label();
1789 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1790 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
1792 bool shouldEmitProfiling = false;
1793 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1795 if (generatedInline) {
1796 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1797 auto done = jit.label();
1798 params.addLatePath([=] (CCallHelpers& jit) {
1799 AllowMacroScratchRegisterUsage allowScratch(jit);
1800 mathICGenerationState->slowPathJumps.link(&jit);
1801 mathICGenerationState->slowPathStart = jit.label();
1802 #if ENABLE(MATH_IC_STATS)
1803 auto slowPathStart = jit.label();
1806 if (mathICGenerationState->shouldSlowPathRepatch) {
1807 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1808 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1809 mathICGenerationState->slowPathCall = call.call();
1811 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1812 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1813 mathICGenerationState->slowPathCall = call.call();
1815 jit.jump().linkTo(done, &jit);
1817 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1818 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1821 #if ENABLE(MATH_IC_STATS)
1822 auto slowPathEnd = jit.label();
1823 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1824 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
1825 mathIC->m_generatedCodeSize += size;
1831 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1832 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1835 #if ENABLE(MATH_IC_STATS)
1836 auto inlineEnd = jit.label();
1837 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1838 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
1839 mathIC->m_generatedCodeSize += size;
1844 setJSValue(patchpoint);
1847 template <typename Generator>
1848 void compileMathIC(JITBinaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1850 Node* node = m_node;
1852 LValue left = lowJSValue(node->child1());
1853 LValue right = lowJSValue(node->child2());
1855 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
1856 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
1858 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1859 patchpoint->appendSomeRegister(left);
1860 patchpoint->appendSomeRegister(right);
1861 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1862 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1863 RefPtr<PatchpointExceptionHandle> exceptionHandle =
1864 preparePatchpointForExceptions(patchpoint);
1865 patchpoint->numGPScratchRegisters = 1;
1866 patchpoint->numFPScratchRegisters = 2;
1867 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1868 State* state = &m_ftlState;
1869 patchpoint->setGenerator(
1870 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1871 AllowMacroScratchRegisterUsage allowScratch(jit);
1873 Box<CCallHelpers::JumpList> exceptions =
1874 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1876 #if ENABLE(MATH_IC_STATS)
1877 auto inlineStart = jit.label();
1880 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1881 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
1882 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
1883 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
1885 bool shouldEmitProfiling = false;
1886 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1888 if (generatedInline) {
1889 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1890 auto done = jit.label();
1891 params.addLatePath([=] (CCallHelpers& jit) {
1892 AllowMacroScratchRegisterUsage allowScratch(jit);
1893 mathICGenerationState->slowPathJumps.link(&jit);
1894 mathICGenerationState->slowPathStart = jit.label();
1895 #if ENABLE(MATH_IC_STATS)
1896 auto slowPathStart = jit.label();
1899 if (mathICGenerationState->shouldSlowPathRepatch) {
1900 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1901 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1902 mathICGenerationState->slowPathCall = call.call();
1904 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1905 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1906 mathICGenerationState->slowPathCall = call.call();
1908 jit.jump().linkTo(done, &jit);
1910 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1911 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1914 #if ENABLE(MATH_IC_STATS)
1915 auto slowPathEnd = jit.label();
1916 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1917 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
1918 mathIC->m_generatedCodeSize += size;
1924 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1925 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1928 #if ENABLE(MATH_IC_STATS)
1929 auto inlineEnd = jit.label();
1930 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1931 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
1932 mathIC->m_generatedCodeSize += size;
1937 setJSValue(patchpoint);
1940 void compileStrCat()
1943 if (m_node->child3()) {
1945 Int64, m_out.operation(operationStrCat3), m_callFrame,
1946 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1947 lowJSValue(m_node->child2(), ManualOperandSpeculation),
1948 lowJSValue(m_node->child3(), ManualOperandSpeculation));
1951 Int64, m_out.operation(operationStrCat2), m_callFrame,
1952 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1953 lowJSValue(m_node->child2(), ManualOperandSpeculation));
1958 void compileArithAddOrSub()
1960 bool isSub = m_node->op() == ArithSub;
1961 switch (m_node->binaryUseKind()) {
1963 LValue left = lowInt32(m_node->child1());
1964 LValue right = lowInt32(m_node->child2());
1966 if (!shouldCheckOverflow(m_node->arithMode())) {
1967 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
1971 CheckValue* result =
1972 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1973 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1979 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)
1980 && !abstractValue(m_node->child2()).couldBeType(SpecInt52Only)) {
1982 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1983 LValue right = lowInt52(m_node->child2(), kind);
1984 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
1988 LValue left = lowInt52(m_node->child1());
1989 LValue right = lowInt52(m_node->child2());
1990 CheckValue* result =
1991 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1992 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1997 case DoubleRepUse: {
1998 LValue C1 = lowDouble(m_node->child1());
1999 LValue C2 = lowDouble(m_node->child2());
2001 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2007 DFG_CRASH(m_graph, m_node, "Bad use kind");
2011 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2012 JITSubIC* subIC = codeBlock()->addJITSubIC(arithProfile);
2013 auto repatchingFunction = operationValueSubOptimize;
2014 auto nonRepatchingFunction = operationValueSub;
2015 compileMathIC(subIC, repatchingFunction, nonRepatchingFunction);
2020 DFG_CRASH(m_graph, m_node, "Bad use kind");
2025 void compileArithClz32()
2027 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2028 LValue operand = lowInt32(m_node->child1());
2029 setInt32(m_out.ctlz32(operand));
2032 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2033 LValue argument = lowJSValue(m_node->child1());
2034 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
2038 void compileArithMul()
2040 switch (m_node->binaryUseKind()) {
2042 LValue left = lowInt32(m_node->child1());
2043 LValue right = lowInt32(m_node->child2());
2047 if (!shouldCheckOverflow(m_node->arithMode()))
2048 result = m_out.mul(left, right);
2050 CheckValue* speculation = m_out.speculateMul(left, right);
2051 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2052 result = speculation;
2055 if (shouldCheckNegativeZero(m_node->arithMode())) {
2056 LBasicBlock slowCase = m_out.newBlock();
2057 LBasicBlock continuation = m_out.newBlock();
2060 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2062 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2063 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2064 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2065 m_out.jump(continuation);
2066 m_out.appendTo(continuation, lastNext);
2075 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2076 LValue right = lowInt52(m_node->child2(), opposite(kind));
2078 CheckValue* result = m_out.speculateMul(left, right);
2079 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2081 if (shouldCheckNegativeZero(m_node->arithMode())) {
2082 LBasicBlock slowCase = m_out.newBlock();
2083 LBasicBlock continuation = m_out.newBlock();
2086 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2088 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2089 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2090 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2091 m_out.jump(continuation);
2092 m_out.appendTo(continuation, lastNext);
2099 case DoubleRepUse: {
2101 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2106 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2107 JITMulIC* mulIC = codeBlock()->addJITMulIC(arithProfile);
2108 auto repatchingFunction = operationValueMulOptimize;
2109 auto nonRepatchingFunction = operationValueMul;
2110 compileMathIC(mulIC, repatchingFunction, nonRepatchingFunction);
2115 DFG_CRASH(m_graph, m_node, "Bad use kind");
2120 void compileArithDiv()
2122 switch (m_node->binaryUseKind()) {
2124 LValue numerator = lowInt32(m_node->child1());
2125 LValue denominator = lowInt32(m_node->child2());
2127 if (shouldCheckNegativeZero(m_node->arithMode())) {
2128 LBasicBlock zeroNumerator = m_out.newBlock();
2129 LBasicBlock numeratorContinuation = m_out.newBlock();
2132 m_out.isZero32(numerator),
2133 rarely(zeroNumerator), usually(numeratorContinuation));
2135 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2138 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2140 m_out.jump(numeratorContinuation);
2142 m_out.appendTo(numeratorContinuation, innerLastNext);
2145 if (shouldCheckOverflow(m_node->arithMode())) {
2146 LBasicBlock unsafeDenominator = m_out.newBlock();
2147 LBasicBlock continuation = m_out.newBlock();
2149 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2151 m_out.above(adjustedDenominator, m_out.int32One),
2152 usually(continuation), rarely(unsafeDenominator));
2154 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2155 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2156 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2157 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2158 m_out.jump(continuation);
2160 m_out.appendTo(continuation, lastNext);
2161 LValue result = m_out.div(numerator, denominator);
2163 Overflow, noValue(), 0,
2164 m_out.notEqual(m_out.mul(result, denominator), numerator));
2167 setInt32(m_out.chillDiv(numerator, denominator));
2172 case DoubleRepUse: {
2173 setDouble(m_out.doubleDiv(
2174 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2179 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2184 DFG_CRASH(m_graph, m_node, "Bad use kind");
2189 void compileArithMod()
2191 switch (m_node->binaryUseKind()) {
2193 LValue numerator = lowInt32(m_node->child1());
2194 LValue denominator = lowInt32(m_node->child2());
2197 if (shouldCheckOverflow(m_node->arithMode())) {
2198 LBasicBlock unsafeDenominator = m_out.newBlock();
2199 LBasicBlock continuation = m_out.newBlock();
2201 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2203 m_out.above(adjustedDenominator, m_out.int32One),
2204 usually(continuation), rarely(unsafeDenominator));
2206 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2207 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2208 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2209 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2210 m_out.jump(continuation);
2212 m_out.appendTo(continuation, lastNext);
2213 LValue result = m_out.mod(numerator, denominator);
2216 remainder = m_out.chillMod(numerator, denominator);
2218 if (shouldCheckNegativeZero(m_node->arithMode())) {
2219 LBasicBlock negativeNumerator = m_out.newBlock();
2220 LBasicBlock numeratorContinuation = m_out.newBlock();
2223 m_out.lessThan(numerator, m_out.int32Zero),
2224 unsure(negativeNumerator), unsure(numeratorContinuation));
2226 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2228 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2230 m_out.jump(numeratorContinuation);
2232 m_out.appendTo(numeratorContinuation, innerLastNext);
2235 setInt32(remainder);
2239 case DoubleRepUse: {
2241 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2246 DFG_CRASH(m_graph, m_node, "Bad use kind");
2251 void compileArithMinOrMax()
2253 switch (m_node->binaryUseKind()) {
2255 LValue left = lowInt32(m_node->child1());
2256 LValue right = lowInt32(m_node->child2());
2260 m_node->op() == ArithMin
2261 ? m_out.lessThan(left, right)
2262 : m_out.lessThan(right, left),
2267 case DoubleRepUse: {
2268 LValue left = lowDouble(m_node->child1());
2269 LValue right = lowDouble(m_node->child2());
2271 LBasicBlock notLessThan = m_out.newBlock();
2272 LBasicBlock continuation = m_out.newBlock();
2274 Vector<ValueFromBlock, 2> results;
2276 results.append(m_out.anchor(left));
2278 m_node->op() == ArithMin
2279 ? m_out.doubleLessThan(left, right)
2280 : m_out.doubleGreaterThan(left, right),
2281 unsure(continuation), unsure(notLessThan));
2283 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2284 results.append(m_out.anchor(m_out.select(
2285 m_node->op() == ArithMin
2286 ? m_out.doubleGreaterThanOrEqual(left, right)
2287 : m_out.doubleLessThanOrEqual(left, right),
2288 right, m_out.constDouble(PNaN))));
2289 m_out.jump(continuation);
2291 m_out.appendTo(continuation, lastNext);
2292 setDouble(m_out.phi(Double, results));
2297 DFG_CRASH(m_graph, m_node, "Bad use kind");
2302 void compileArithAbs()
2304 switch (m_node->child1().useKind()) {
2306 LValue value = lowInt32(m_node->child1());
2308 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2309 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2311 if (shouldCheckOverflow(m_node->arithMode()))
2312 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2318 case DoubleRepUse: {
2319 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2324 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2325 LValue argument = lowJSValue(m_node->child1());
2326 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2333 void compileArithUnary()
2335 if (m_node->child1().useKind() == DoubleRepUse) {
2336 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
2339 LValue argument = lowJSValue(m_node->child1());
2340 LValue result = vmCall(Double, m_out.operation(DFG::arithUnaryOperation(m_node->arithUnaryType())), m_callFrame, argument);
2344 void compileArithPow()
2346 if (m_node->child2().useKind() == Int32Use)
2347 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2349 LValue base = lowDouble(m_node->child1());
2350 LValue exponent = lowDouble(m_node->child2());
2352 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2353 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2354 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2355 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2356 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2357 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2358 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2359 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2360 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2361 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2362 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2363 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2364 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2365 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2366 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2367 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2368 LBasicBlock powBlock = m_out.newBlock();
2369 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2370 LBasicBlock continuation = m_out.newBlock();
2372 LValue integerExponent = m_out.doubleToInt(exponent);
2373 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2374 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2375 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2377 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2378 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2379 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2381 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2382 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2383 m_out.jump(continuation);
2385 // If y is NaN, the result is NaN.
2386 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2387 LValue exponentIsNaN;
2388 if (provenType(m_node->child2()) & SpecDoubleNaN)
2389 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2391 exponentIsNaN = m_out.booleanFalse;
2392 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2394 // If abs(x) is 1 and y is +infinity, the result is NaN.
2395 // If abs(x) is 1 and y is -infinity, the result is NaN.
2397 // Test if base == 1.
2398 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2399 LValue absoluteBase = m_out.doubleAbs(base);
2400 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2401 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2403 // Test if abs(y) == Infinity.
2404 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2405 LValue absoluteExponent = m_out.doubleAbs(exponent);
2406 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2407 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2409 // If y == 0.5 or y == -0.5, handle it through SQRT.
2410 // We have be carefuly with -0 and -Infinity.
2413 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2414 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2415 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2418 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2419 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2420 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2421 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2423 // Test if abs(x) == Infinity.
2424 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2425 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2426 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2428 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2429 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2430 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2431 m_out.jump(continuation);
2433 // The exponent is 0.5, the base is infinite, the result is always infinite.
2434 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2435 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2436 m_out.jump(continuation);
2438 // Test if y == -0.5
2439 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2440 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2441 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2444 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2445 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2446 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2448 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2449 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2450 m_out.jump(continuation);
2452 // Test if abs(x) == Infinity.
2453 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2454 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2455 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2457 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2458 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2459 LValue sqrtBase = m_out.doubleSqrt(base);
2460 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2461 m_out.jump(continuation);
2463 // The exponent is -0.5, the base is infinite, the result is always zero.
2464 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2465 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2466 m_out.jump(continuation);
2468 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2469 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2470 m_out.jump(continuation);
2472 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2473 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2474 m_out.jump(continuation);
2476 m_out.appendTo(continuation, lastNext);
2477 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2481 void compileArithRandom()
2483 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2485 // Inlined WeakRandom::advance().
2486 // uint64_t x = m_low;
2487 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2488 LValue low = m_out.load64(m_out.absolute(lowAddress));
2489 // uint64_t y = m_high;
2490 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2491 LValue high = m_out.load64(m_out.absolute(highAddress));
2493 m_out.store64(high, m_out.absolute(lowAddress));
2496 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2499 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2501 // x ^= y ^ (y >> 26);
2502 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2505 m_out.store64(phase3, m_out.absolute(highAddress));
2508 LValue random64 = m_out.add(phase3, high);
2510 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2511 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2513 LValue double53Integer = m_out.intToDouble(random53);
2515 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2516 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2517 static const double scale = 1.0 / (1ULL << 53);
2519 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2520 // It just reduces the exp part of the given 53bit double integer.
2521 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2522 // Now we get 53bit precision random double value in [0, 1).
2523 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2528 void compileArithRound()
2530 if (m_node->child1().useKind() == DoubleRepUse) {
2531 LValue result = nullptr;
2532 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2533 LValue value = lowDouble(m_node->child1());
2534 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2536 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2537 LBasicBlock continuation = m_out.newBlock();
2539 LValue value = lowDouble(m_node->child1());
2540 LValue integerValue = m_out.doubleCeil(value);
2541 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2543 LValue realPart = m_out.doubleSub(integerValue, value);
2545 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2547 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2548 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2549 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2550 m_out.jump(continuation);
2551 m_out.appendTo(continuation, lastNext);
2553 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2556 if (producesInteger(m_node->arithRoundingMode())) {
2557 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2558 setInt32(integerValue);
2564 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2565 LValue argument = lowJSValue(m_node->child1());
2566 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2569 void compileArithFloor()
2571 if (m_node->child1().useKind() == DoubleRepUse) {
2572 LValue value = lowDouble(m_node->child1());
2573 LValue integerValue = m_out.doubleFloor(value);
2574 if (producesInteger(m_node->arithRoundingMode()))
2575 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2577 setDouble(integerValue);
2580 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2581 LValue argument = lowJSValue(m_node->child1());
2582 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2585 void compileArithCeil()
2587 if (m_node->child1().useKind() == DoubleRepUse) {
2588 LValue value = lowDouble(m_node->child1());
2589 LValue integerValue = m_out.doubleCeil(value);
2590 if (producesInteger(m_node->arithRoundingMode()))
2591 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2593 setDouble(integerValue);
2596 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2597 LValue argument = lowJSValue(m_node->child1());
2598 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2601 void compileArithTrunc()
2603 if (m_node->child1().useKind() == DoubleRepUse) {
2604 LValue value = lowDouble(m_node->child1());
2605 LValue result = m_out.doubleTrunc(value);
2606 if (producesInteger(m_node->arithRoundingMode()))
2607 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2612 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2613 LValue argument = lowJSValue(m_node->child1());
2614 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
2617 void compileArithSqrt()
2619 if (m_node->child1().useKind() == DoubleRepUse) {
2620 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
2623 LValue argument = lowJSValue(m_node->child1());
2624 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
2628 void compileArithFRound()
2630 if (m_node->child1().useKind() == DoubleRepUse) {
2631 setDouble(m_out.fround(lowDouble(m_node->child1())));
2634 LValue argument = lowJSValue(m_node->child1());
2635 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
2639 void compileArithNegate()
2641 switch (m_node->child1().useKind()) {
2643 LValue value = lowInt32(m_node->child1());
2646 if (!shouldCheckOverflow(m_node->arithMode()))
2647 result = m_out.neg(value);
2648 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
2649 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
2650 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
2653 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
2654 result = m_out.neg(value);
2662 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)) {
2664 LValue value = lowWhicheverInt52(m_node->child1(), kind);
2665 LValue result = m_out.neg(value);
2666 if (shouldCheckNegativeZero(m_node->arithMode()))
2667 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2668 setInt52(result, kind);
2672 LValue value = lowInt52(m_node->child1());
2673 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
2674 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
2675 if (shouldCheckNegativeZero(m_node->arithMode()))
2676 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2681 case DoubleRepUse: {
2682 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
2687 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2688 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2689 JITNegIC* negIC = codeBlock()->addJITNegIC(arithProfile);
2690 auto repatchingFunction = operationArithNegateOptimize;
2691 auto nonRepatchingFunction = operationArithNegate;
2692 compileMathIC(negIC, repatchingFunction, nonRepatchingFunction);
2697 void compileBitAnd()
2699 if (m_node->isBinaryUseKind(UntypedUse)) {
2700 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
2703 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2708 if (m_node->isBinaryUseKind(UntypedUse)) {
2709 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
2712 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2715 void compileBitXor()
2717 if (m_node->isBinaryUseKind(UntypedUse)) {
2718 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
2721 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2724 void compileBitRShift()
2726 if (m_node->isBinaryUseKind(UntypedUse)) {
2727 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
2730 setInt32(m_out.aShr(
2731 lowInt32(m_node->child1()),
2732 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2735 void compileBitLShift()
2737 if (m_node->isBinaryUseKind(UntypedUse)) {
2738 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
2742 lowInt32(m_node->child1()),
2743 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2746 void compileBitURShift()
2748 if (m_node->isBinaryUseKind(UntypedUse)) {
2749 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
2752 setInt32(m_out.lShr(
2753 lowInt32(m_node->child1()),
2754 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2757 void compileUInt32ToNumber()
2759 LValue value = lowInt32(m_node->child1());
2761 if (doesOverflow(m_node->arithMode())) {
2762 setStrictInt52(m_out.zeroExtPtr(value));
2766 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
2770 void compileCheckStructure()
2773 if (m_node->child1()->hasConstant())
2774 exitKind = BadConstantCache;
2776 exitKind = BadCache;
2778 switch (m_node->child1().useKind()) {
2780 case KnownCellUse: {
2781 LValue cell = lowCell(m_node->child1());
2784 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2785 exitKind, m_node->structureSet(),
2786 [&] (RegisteredStructure structure) {
2787 return weakStructureID(structure);
2792 case CellOrOtherUse: {
2793 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
2795 LBasicBlock cellCase = m_out.newBlock();
2796 LBasicBlock notCellCase = m_out.newBlock();
2797 LBasicBlock continuation = m_out.newBlock();
2800 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2802 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2804 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
2805 exitKind, m_node->structureSet(),
2806 [&] (RegisteredStructure structure) {
2807 return weakStructureID(structure);
2809 m_out.jump(continuation);
2811 m_out.appendTo(notCellCase, continuation);
2812 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
2813 m_out.jump(continuation);
2815 m_out.appendTo(continuation, lastNext);
2820 DFG_CRASH(m_graph, m_node, "Bad use kind");
2825 void compileCheckStructureOrEmpty()
2828 if (m_node->child1()->hasConstant())
2829 exitKind = BadConstantCache;
2831 exitKind = BadCache;
2833 LValue cell = lowCell(m_node->child1());
2834 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
2835 LBasicBlock notEmpty;
2836 LBasicBlock continuation;
2837 LBasicBlock lastNext;
2838 if (maySeeEmptyValue) {
2839 notEmpty = m_out.newBlock();
2840 continuation = m_out.newBlock();
2841 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
2842 lastNext = m_out.appendTo(notEmpty, continuation);
2846 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2847 exitKind, m_node->structureSet(),
2848 [&] (RegisteredStructure structure) {
2849 return weakStructureID(structure);
2852 if (maySeeEmptyValue) {
2853 m_out.jump(continuation);
2854 m_out.appendTo(continuation, lastNext);
2858 void compileCheckCell()
2860 LValue cell = lowCell(m_node->child1());
2863 BadCell, jsValueValue(cell), m_node->child1().node(),
2864 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
2867 void compileCheckBadCell()
2872 void compileCheckNotEmpty()
2874 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
2877 void compileCheckStringIdent()
2879 UniquedStringImpl* uid = m_node->uidOperand();
2880 LValue stringImpl = lowStringIdent(m_node->child1());
2881 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
2884 void compileGetExecutable()
2886 LValue cell = lowCell(m_node->child1());
2887 speculateFunction(m_node->child1(), cell);
2888 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
2891 void compileArrayify()
2893 LValue cell = lowCell(m_node->child1());
2894 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
2896 LBasicBlock unexpectedStructure = m_out.newBlock();
2897 LBasicBlock continuation = m_out.newBlock();
2899 auto isUnexpectedArray = [&] (LValue cell) {
2900 if (m_node->op() == Arrayify)
2901 return m_out.logicalNot(isArrayTypeForArrayify(cell, m_node->arrayMode()));
2903 ASSERT(m_node->op() == ArrayifyToStructure);
2904 return m_out.notEqual(m_out.load32(cell, m_heaps.JSCell_structureID), weakStructureID(m_node->structure()));
2907 m_out.branch(isUnexpectedArray(cell), rarely(unexpectedStructure), usually(continuation));
2909 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
2912 switch (m_node->arrayMode().type()) {
2915 case Array::Contiguous:
2917 Uncountable, noValue(), 0,
2918 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
2925 switch (m_node->arrayMode().type()) {
2927 vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
2930 vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
2932 case Array::Contiguous:
2933 vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
2935 case Array::ArrayStorage:
2936 case Array::SlowPutArrayStorage:
2937 vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
2940 DFG_CRASH(m_graph, m_node, "Bad array type");
2944 speculate(BadIndexingType, jsValueValue(cell), 0, isUnexpectedArray(cell));
2945 m_out.jump(continuation);
2947 m_out.appendTo(continuation, lastNext);
2950 void compilePutStructure()
2952 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
2954 RegisteredStructure oldStructure = m_node->transition()->previous;
2955 RegisteredStructure newStructure = m_node->transition()->next;
2956 ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
2957 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
2958 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
2960 LValue cell = lowCell(m_node->child1());
2962 weakStructureID(newStructure),
2963 cell, m_heaps.JSCell_structureID);
2966 void compileGetById(AccessType type)
2968 ASSERT(type == AccessType::Get || type == AccessType::TryGet);
2969 switch (m_node->child1().useKind()) {
2971 setJSValue(getById(lowCell(m_node->child1()), type));
2976 // This is pretty weird, since we duplicate the slow path both here and in the
2977 // code generated by the IC. We should investigate making this less bad.
2978 // https://bugs.webkit.org/show_bug.cgi?id=127830
2979 LValue value = lowJSValue(m_node->child1());
2981 LBasicBlock cellCase = m_out.newBlock();
2982 LBasicBlock notCellCase = m_out.newBlock();
2983 LBasicBlock continuation = m_out.newBlock();
2986 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2988 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2989 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
2990 m_out.jump(continuation);
2992 J_JITOperation_EJI getByIdFunction;
2993 if (type == AccessType::Get)
2994 getByIdFunction = operationGetByIdGeneric;
2996 getByIdFunction = operationTryGetByIdGeneric;
2998 m_out.appendTo(notCellCase, continuation);
2999 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3000 Int64, m_out.operation(getByIdFunction),
3002 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3003 m_out.jump(continuation);
3005 m_out.appendTo(continuation, lastNext);
3006 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3011 DFG_CRASH(m_graph, m_node, "Bad use kind");
3016 void compileGetByIdWithThis()
3018 if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
3019 setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
3021 LValue base = lowJSValue(m_node->child1());
3022 LValue thisValue = lowJSValue(m_node->child2());
3024 LBasicBlock baseCellCase = m_out.newBlock();
3025 LBasicBlock notCellCase = m_out.newBlock();
3026 LBasicBlock thisValueCellCase = m_out.newBlock();
3027 LBasicBlock continuation = m_out.newBlock();
3030 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
3032 LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
3035 isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
3037 m_out.appendTo(thisValueCellCase, notCellCase);
3038 ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
3039 m_out.jump(continuation);
3041 m_out.appendTo(notCellCase, continuation);
3042 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3043 Int64, m_out.operation(operationGetByIdWithThis),
3044 m_callFrame, base, thisValue,
3045 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3046 m_out.jump(continuation);
3048 m_out.appendTo(continuation, lastNext);
3049 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3054 void compileGetByValWithThis()
3056 LValue base = lowJSValue(m_node->child1());
3057 LValue thisValue = lowJSValue(m_node->child2());
3058 LValue subscript = lowJSValue(m_node->child3());
3060 LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
3064 void compilePutByIdWithThis()
3066 LValue base = lowJSValue(m_node->child1());
3067 LValue thisValue = lowJSValue(m_node->child2());
3068 LValue value = lowJSValue(m_node->child3());
3070 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
3071 m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
3074 void compilePutByValWithThis()
3076 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
3077 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
3078 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
3079 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
3081 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
3082 m_callFrame, base, thisValue, property, value);
3085 void compileAtomicsReadModifyWrite()
3087 TypedArrayType type = m_node->arrayMode().typedArrayType();
3088 unsigned numExtraArgs = numExtraAtomicsArgs(m_node->op());
3089 Edge baseEdge = m_graph.child(m_node, 0);
3090 Edge indexEdge = m_graph.child(m_node, 1);
3091 Edge argEdges[maxNumExtraAtomicsArgs];
3092 for (unsigned i = numExtraArgs; i--;)
3093 argEdges[i] = m_graph.child(m_node, 2 + i);
3094 Edge storageEdge = m_graph.child(m_node, 2 + numExtraArgs);
3096 auto operation = [&] () -> LValue {
3097 switch (m_node->op()) {
3099 return m_out.operation(operationAtomicsAdd);
3101 return m_out.operation(operationAtomicsAnd);
3102 case AtomicsCompareExchange:
3103 return m_out.operation(operationAtomicsCompareExchange);
3104 case AtomicsExchange:
3105 return m_out.operation(operationAtomicsExchange);
3107 return m_out.operation(operationAtomicsLoad);
3109 return m_out.operation(operationAtomicsOr);
3111 return m_out.operation(operationAtomicsStore);
3113 return m_out.operation(operationAtomicsSub);
3115 return m_out.operation(operationAtomicsXor);
3117 RELEASE_ASSERT_NOT_REACHED();
3123 Vector<LValue> args;
3124 args.append(m_callFrame);
3125 args.append(lowJSValue(baseEdge));
3126 args.append(lowJSValue(indexEdge));
3127 for (unsigned i = 0; i < numExtraArgs; ++i)
3128 args.append(lowJSValue(argEdges[i]));
3129 LValue result = vmCall(Int64, operation(), args);
3134 LValue index = lowInt32(indexEdge);
3136 for (unsigned i = numExtraArgs; i--;)
3137 args[i] = getIntTypedArrayStoreOperand(argEdges[i]);
3138 LValue storage = lowStorage(storageEdge);
3140 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
3141 Width width = widthForBytes(elementSize(type));
3146 auto sanitizeResult = [&] (LValue value) -> LValue {