2 * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLLowerDFGToB3.h"
32 #include "AirGenerationContext.h"
33 #include "AllowMacroScratchRegisterUsage.h"
34 #include "AtomicsObject.h"
35 #include "B3CheckValue.h"
36 #include "B3FenceValue.h"
37 #include "B3PatchpointValue.h"
38 #include "B3SlotBaseValue.h"
39 #include "B3StackmapGenerationParams.h"
40 #include "B3ValueInlines.h"
41 #include "CallFrameShuffler.h"
42 #include "CodeBlockWithJITType.h"
43 #include "DFGAbstractInterpreterInlines.h"
44 #include "DFGCapabilities.h"
45 #include "DFGDominators.h"
46 #include "DFGInPlaceAbstractState.h"
47 #include "DFGOSRAvailabilityAnalysisPhase.h"
48 #include "DFGOSRExitFuzz.h"
49 #include "DirectArguments.h"
50 #include "FTLAbstractHeapRepository.h"
51 #include "FTLAvailableRecovery.h"
52 #include "FTLExceptionTarget.h"
53 #include "FTLForOSREntryJITCode.h"
54 #include "FTLFormattedValue.h"
55 #include "FTLLazySlowPathCall.h"
56 #include "FTLLoweredNodeValue.h"
57 #include "FTLOperations.h"
58 #include "FTLOutput.h"
59 #include "FTLPatchpointExceptionHandle.h"
60 #include "FTLSnippetParams.h"
61 #include "FTLThunks.h"
62 #include "FTLWeightedTarget.h"
63 #include "JITAddGenerator.h"
64 #include "JITBitAndGenerator.h"
65 #include "JITBitOrGenerator.h"
66 #include "JITBitXorGenerator.h"
67 #include "JITDivGenerator.h"
68 #include "JITInlineCacheGenerator.h"
69 #include "JITLeftShiftGenerator.h"
70 #include "JITMathIC.h"
71 #include "JITMulGenerator.h"
72 #include "JITRightShiftGenerator.h"
73 #include "JITSubGenerator.h"
74 #include "JSAsyncFunction.h"
75 #include "JSAsyncGeneratorFunction.h"
76 #include "JSCInlines.h"
77 #include "JSGeneratorFunction.h"
78 #include "JSLexicalEnvironment.h"
80 #include "OperandsInlines.h"
81 #include "ScopedArguments.h"
82 #include "ScopedArgumentsTable.h"
83 #include "ScratchRegisterAllocator.h"
84 #include "SetupVarargsFrame.h"
85 #include "ShadowChicken.h"
86 #include "StructureStubInfo.h"
87 #include "SuperSampler.h"
88 #include "ThunkGenerators.h"
89 #include "VirtualRegister.h"
92 #include <unordered_set>
94 #include <wtf/Gigacage.h>
96 namespace JSC { namespace FTL {
103 std::atomic<int> compileCounter;
106 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
107 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
109 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
110 if (nodeIndex != UINT_MAX)
111 dataLog(", node @", nodeIndex);
117 // Using this instead of typeCheck() helps to reduce the load on B3, by creating
118 // significantly less dead code.
119 #define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
120 FormattedValue _ftc_lowValue = (lowValue); \
121 Edge _ftc_highValue = (highValue); \
122 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
123 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
125 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
128 #define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
129 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
132 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
134 LowerDFGToB3(State& state)
135 : m_graph(state.graph)
138 , m_proc(*state.proc)
139 , m_availabilityCalculator(m_graph)
140 , m_state(state.graph)
141 , m_interpreter(state.graph, m_state)
147 State* state = &m_ftlState;
150 if (verboseCompilationEnabled()) {
152 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
153 "_", codeBlock()->hash());
158 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
159 CodeBlock* codeBlock = m_graph.m_codeBlock;
161 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
162 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
163 AllowMacroScratchRegisterUsage allowScratch(jit);
164 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
165 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
166 jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
169 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
170 RELEASE_ASSERT(catchEntrypointIndex != 0);
171 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
174 if (m_graph.m_maxLocalsForCatchOSREntry) {
175 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
176 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
180 m_graph.ensureSSADominators();
182 if (verboseCompilationEnabled())
183 dataLog("Function ready, beginning lowering.\n");
185 m_out.initialize(m_heaps);
187 // We use prologue frequency for all of the initialization code.
188 m_out.setFrequency(1);
190 LBasicBlock prologue = m_out.newBlock();
191 LBasicBlock callEntrypointArgumentSpeculations = m_out.newBlock();
192 m_handleExceptions = m_out.newBlock();
194 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
195 m_highBlock = m_graph.block(blockIndex);
198 m_out.setFrequency(m_highBlock->executionCount);
199 m_blocks.add(m_highBlock, m_out.newBlock());
202 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
203 m_out.setFrequency(1);
205 m_out.appendTo(prologue, callEntrypointArgumentSpeculations);
206 m_out.initializeConstants(m_proc, prologue);
207 createPhiVariables();
209 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
210 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
211 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
212 state->capturedValue = capturedBase->slot();
214 auto preOrder = m_graph.blocksInPreOrder();
216 m_callFrame = m_out.framePointer();
217 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
218 m_tagMask = m_out.constInt64(TagMask);
220 // Make sure that B3 knows that we really care about the mask registers. This forces the
221 // constants to be materialized in registers.
222 m_proc.addFastConstant(m_tagTypeNumber->key());
223 m_proc.addFastConstant(m_tagMask->key());
225 // We don't want the CodeBlock to have a weak pointer to itself because
226 // that would cause it to always get collected.
227 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
229 VM* vm = &this->vm();
231 // Stack Overflow Check.
232 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
233 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
234 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
235 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
236 stackOverflowHandler->appendSomeRegister(m_callFrame);
237 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
238 stackOverflowHandler->numGPScratchRegisters = 1;
239 stackOverflowHandler->setGenerator(
240 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
241 AllowMacroScratchRegisterUsage allowScratch(jit);
242 GPRReg fp = params[0].gpr();
243 GPRReg scratch = params.gpScratch(0);
245 unsigned ftlFrameSize = params.proc().frameSize();
246 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
248 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
249 MacroAssembler::JumpList stackOverflow;
250 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
251 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
252 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
254 params.addLatePath([=] (CCallHelpers& jit) {
255 AllowMacroScratchRegisterUsage allowScratch(jit);
257 stackOverflow.link(&jit);
259 // FIXME: We would not have to do this if the stack check was part of the Air
260 // prologue. Then, we would know that there is no way for the callee-saves to
262 // https://bugs.webkit.org/show_bug.cgi?id=172456
263 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
266 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
267 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
268 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
270 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
271 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
272 CCallHelpers::Call throwCall = jit.call();
274 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
275 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
276 CCallHelpers::Call lookupExceptionHandlerCall = jit.call();
277 jit.jumpToExceptionHandler(*vm);
280 [=] (LinkBuffer& linkBuffer) {
281 linkBuffer.link(throwCall, FunctionPtr(operationThrowStackOverflowError));
282 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
287 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
290 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
291 successors[0] = callEntrypointArgumentSpeculations;
292 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
293 // Currently, the only other entrypoint is an op_catch entrypoint.
294 // We do OSR entry at op_catch, and we prove argument formats before
295 // jumping to FTL code, so we don't need to check argument types here
296 // for these entrypoints.
297 successors[i] = firstDFGBasicBlock;
300 m_out.entrySwitch(successors);
301 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
304 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
307 availabilityMap().clear();
308 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
309 for (unsigned i = codeBlock()->numParameters(); i--;) {
310 availabilityMap().m_locals.argument(i) =
311 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
314 for (unsigned i = codeBlock()->numParameters(); i--;) {
315 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
316 VirtualRegister operand = virtualRegisterForArgument(i);
317 LValue jsValue = m_out.load64(addressFor(operand));
319 switch (m_graph.m_argumentFormats[0][i]) {
321 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
324 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
327 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
332 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
336 m_out.jump(firstDFGBasicBlock);
340 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
341 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
342 m_out.patchpoint(Void)->setGenerator(
343 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
344 CCallHelpers::Jump jump = jit.jump();
346 [=] (LinkBuffer& linkBuffer) {
347 linkBuffer.link(jump, linkBuffer.locationOf(*exceptionHandler));
352 for (DFG::BasicBlock* block : preOrder)
355 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
356 // to happen last because our abstract heaps are generated lazily. They have to be
357 // generated lazily because we have an infinite number of numbered, indexed, and
358 // absolute heaps. We only become aware of the ones we actually mention while lowering.
359 m_heaps.computeRangesAndDecorateInstructions();
361 // We create all Phi's up front, but we may then decide not to compile the basic block
362 // that would have contained one of them. So this creates orphans, which triggers B3
363 // validation failures. Calling this fixes the issue.
365 // Note that you should avoid the temptation to make this call conditional upon
366 // validation being enabled. B3 makes no guarantees of any kind of correctness when
367 // dealing with IR that would have failed validation. For example, it would be valid to
368 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
369 // if any orphans were around. We might even have such phases already.
370 m_proc.deleteOrphans();
372 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
373 m_out.applyBlockOrder();
378 void createPhiVariables()
380 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
381 DFG::BasicBlock* block = m_graph.block(blockIndex);
384 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
385 Node* node = block->at(nodeIndex);
386 if (node->op() != DFG::Phi)
389 switch (node->flags() & NodeResultMask) {
390 case NodeResultDouble:
393 case NodeResultInt32:
396 case NodeResultInt52:
399 case NodeResultBoolean:
406 DFG_CRASH(m_graph, node, "Bad Phi node result type");
409 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
414 void compileBlock(DFG::BasicBlock* block)
419 if (verboseCompilationEnabled())
420 dataLog("Compiling block ", *block, "\n");
424 // Make sure that any blocks created while lowering code in the high block have the frequency of
425 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
426 // something roughly approximate for things like register allocation.
427 m_out.setFrequency(m_highBlock->executionCount);
429 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
432 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
433 m_nextHighBlock = m_graph.block(nextBlockIndex);
437 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
439 // All of this effort to find the next block gives us the ability to keep the
440 // generated IR in roughly program order. This ought not affect the performance
441 // of the generated code (since we expect B3 to reorder things) but it will
442 // make IR dumps easier to read.
443 m_out.appendTo(lowBlock, m_nextLowBlock);
445 if (Options::ftlCrashes())
448 if (!m_highBlock->cfaHasVisited) {
449 if (verboseCompilationEnabled())
450 dataLog("Bailing because CFA didn't reach.\n");
451 crash(m_highBlock, nullptr);
455 m_availabilityCalculator.beginBlock(m_highBlock);
458 m_state.beginBasicBlock(m_highBlock);
460 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
461 if (!compileNode(m_nodeIndex))
466 void safelyInvalidateAfterTermination()
468 if (verboseCompilationEnabled())
469 dataLog("Bailing.\n");
472 // Invalidate dominated blocks. Under normal circumstances we would expect
473 // them to be invalidated already. But you can have the CFA become more
474 // precise over time because the structures of objects change on the main
475 // thread. Failing to do this would result in weird crashes due to a value
476 // being used but not defined. Race conditions FTW!
477 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
478 DFG::BasicBlock* target = m_graph.block(blockIndex);
481 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
482 if (verboseCompilationEnabled())
483 dataLog("Block ", *target, " will bail also.\n");
484 target->cfaHasVisited = false;
489 bool compileNode(unsigned nodeIndex)
491 if (!m_state.isValid()) {
492 safelyInvalidateAfterTermination();
496 m_node = m_highBlock->at(nodeIndex);
497 m_origin = m_node->origin;
498 m_out.setOrigin(m_node);
500 if (verboseCompilationEnabled())
501 dataLog("Lowering ", m_node, "\n");
503 m_availableRecoveries.shrink(0);
505 m_interpreter.startExecuting();
506 m_interpreter.executeKnownEdgeTypes(m_node);
508 switch (m_node->op()) {
518 compileDoubleConstant();
521 compileInt52Constant();
524 compileLazyJSConstant();
530 compileDoubleAsInt32();
539 compileValueToInt32();
541 case BooleanToNumber:
542 compileBooleanToNumber();
544 case ExtractOSREntryLocal:
545 compileExtractOSREntryLocal();
547 case ExtractCatchLocal:
548 compileExtractCatchLocal();
560 case CallObjectConstructor:
561 compileToObjectOrCallObjectConstructor();
574 compileArithAddOrSub();
590 compileArithMinOrMax();
599 compileArithRandom();
617 compileArithFRound();
620 compileArithNegate();
644 compileUInt32ToNumber();
647 compileCheckStructure();
649 case CheckStructureOrEmpty:
650 compileCheckStructureOrEmpty();
656 compileCheckNotEmpty();
659 compileCheckBadCell();
661 case CheckStringIdent:
662 compileCheckStringIdent();
665 compileGetExecutable();
668 case ArrayifyToStructure:
672 compilePutStructure();
675 compileGetById(AccessType::TryGet);
679 compileGetById(AccessType::Get);
681 case GetByIdWithThis:
682 compileGetByIdWithThis();
688 compileHasOwnProperty();
695 case PutByIdWithThis:
696 compilePutByIdWithThis();
700 compilePutAccessorById();
702 case PutGetterSetterById:
703 compilePutGetterSetterById();
707 compilePutAccessorByVal();
713 compileDeleteByVal();
716 case GetButterflyWithoutCaging:
717 compileGetButterfly();
719 case ConstantStoragePointer:
720 compileConstantStoragePointer();
722 case GetIndexedPropertyStorage:
723 compileGetIndexedPropertyStorage();
729 compileGetArrayLength();
731 case GetVectorLength:
732 compileGetVectorLength();
735 compileCheckInBounds();
740 case GetMyArgumentByVal:
741 case GetMyArgumentByValOutOfBounds:
742 compileGetMyArgumentByVal();
744 case GetByValWithThis:
745 compileGetByValWithThis();
752 case PutByValWithThis:
753 compilePutByValWithThis();
757 case AtomicsCompareExchange:
758 case AtomicsExchange:
764 compileAtomicsReadModifyWrite();
766 case AtomicsIsLockFree:
767 compileAtomicsIsLockFree();
769 case DefineDataProperty:
770 compileDefineDataProperty();
772 case DefineAccessorProperty:
773 compileDefineAccessorProperty();
785 compileArrayIndexOf();
787 case CreateActivation:
788 compileCreateActivation();
791 compilePushWithScope();
794 case NewGeneratorFunction:
795 case NewAsyncGeneratorFunction:
796 case NewAsyncFunction:
797 compileNewFunction();
799 case CreateDirectArguments:
800 compileCreateDirectArguments();
802 case CreateScopedArguments:
803 compileCreateScopedArguments();
805 case CreateClonedArguments:
806 compileCreateClonedArguments();
811 case NewStringObject:
812 compileNewStringObject();
817 case NewArrayWithSpread:
818 compileNewArrayWithSpread();
824 compileNewArrayBuffer();
826 case NewArrayWithSize:
827 compileNewArrayWithSize();
830 compileNewTypedArray();
832 case GetTypedArrayByteOffset:
833 compileGetTypedArrayByteOffset();
836 compileGetPrototypeOf();
838 case AllocatePropertyStorage:
839 compileAllocatePropertyStorage();
841 case ReallocatePropertyStorage:
842 compileReallocatePropertyStorage();
844 case NukeStructureAndSetButterfly:
845 compileNukeStructureAndSetButterfly();
851 case CallStringConstructor:
852 compileToStringOrCallStringConstructor();
855 compileToPrimitive();
861 compileStringCharAt();
863 case StringCharCodeAt:
864 compileStringCharCodeAt();
866 case StringFromCharCode:
867 compileStringFromCharCode();
870 case GetGetterSetterByOffset:
871 compileGetByOffset();
879 case MultiGetByOffset:
880 compileMultiGetByOffset();
883 compilePutByOffset();
885 case MultiPutByOffset:
886 compileMultiPutByOffset();
889 case GetGlobalLexicalVariable:
890 compileGetGlobalVariable();
892 case PutGlobalVariable:
893 compilePutGlobalVariable();
896 compileNotifyWrite();
901 case GetArgumentCountIncludingThis:
902 compileGetArgumentCountIncludingThis();
910 case GetGlobalObject:
911 compileGetGlobalObject();
914 compileGetGlobalThis();
917 compileGetClosureVar();
920 compilePutClosureVar();
922 case GetFromArguments:
923 compileGetFromArguments();
926 compilePutToArguments();
929 compileGetArgument();
934 case CompareStrictEq:
935 compileCompareStrictEq();
938 compileCompareLess();
941 compileCompareLessEq();
944 compileCompareGreater();
946 case CompareGreaterEq:
947 compileCompareGreaterEq();
950 compileCompareBelow();
953 compileCompareBelowEq();
956 compileCompareEqPtr();
962 case TailCallInlinedCaller:
964 compileCallOrConstruct();
967 case DirectTailCallInlinedCaller:
968 case DirectConstruct:
970 compileDirectCallOrConstruct();
976 case CallForwardVarargs:
977 case TailCallVarargs:
978 case TailCallVarargsInlinedCaller:
979 case TailCallForwardVarargs:
980 case TailCallForwardVarargsInlinedCaller:
981 case ConstructVarargs:
982 case ConstructForwardVarargs:
983 compileCallOrConstructVarargs();
989 compileLoadVarargs();
992 compileForwardVarargs();
1003 case DFG::EntrySwitch:
1004 compileEntrySwitch();
1010 compileForceOSRExit();
1014 compileCPUIntrinsic();
1016 RELEASE_ASSERT_NOT_REACHED();
1022 case ThrowStaticError:
1023 compileThrowStaticError();
1025 case InvalidationPoint:
1026 compileInvalidationPoint();
1032 compileIsUndefined();
1040 case IsCellWithType:
1041 compileIsCellWithType();
1046 case NormalizeMapKey:
1047 compileNormalizeMapKey();
1050 compileGetMapBucket();
1052 case GetMapBucketHead:
1053 compileGetMapBucketHead();
1055 case GetMapBucketNext:
1056 compileGetMapBucketNext();
1058 case LoadKeyFromMapBucket:
1059 compileLoadKeyFromMapBucket();
1061 case LoadValueFromMapBucket:
1062 compileLoadValueFromMapBucket();
1071 compileWeakMapGet();
1076 case IsObjectOrNull:
1077 compileIsObjectOrNull();
1080 compileIsFunction();
1082 case IsTypedArrayView:
1083 compileIsTypedArrayView();
1091 case CheckTypeInfoFlags:
1092 compileCheckTypeInfoFlags();
1094 case OverridesHasInstance:
1095 compileOverridesHasInstance();
1098 compileInstanceOf();
1100 case InstanceOfCustom:
1101 compileInstanceOfCustom();
1103 case CountExecution:
1104 compileCountExecution();
1106 case SuperSamplerBegin:
1107 compileSuperSamplerBegin();
1109 case SuperSamplerEnd:
1110 compileSuperSamplerEnd();
1113 case FencedStoreBarrier:
1114 compileStoreBarrier();
1116 case HasIndexedProperty:
1117 compileHasIndexedProperty();
1119 case HasGenericProperty:
1120 compileHasGenericProperty();
1122 case HasStructureProperty:
1123 compileHasStructureProperty();
1125 case GetDirectPname:
1126 compileGetDirectPname();
1128 case GetEnumerableLength:
1129 compileGetEnumerableLength();
1131 case GetPropertyEnumerator:
1132 compileGetPropertyEnumerator();
1134 case GetEnumeratorStructurePname:
1135 compileGetEnumeratorStructurePname();
1137 case GetEnumeratorGenericPname:
1138 compileGetEnumeratorGenericPname();
1141 compileToIndexString();
1143 case CheckStructureImmediate:
1144 compileCheckStructureImmediate();
1146 case MaterializeNewObject:
1147 compileMaterializeNewObject();
1149 case MaterializeCreateActivation:
1150 compileMaterializeCreateActivation();
1153 if (Options::usePollingTraps())
1154 compileCheckTraps();
1157 compileCreateRest();
1160 compileGetRestLength();
1163 compileRegExpExec();
1166 compileRegExpTest();
1171 case SetFunctionName:
1172 compileSetFunctionName();
1175 case StringReplaceRegExp:
1176 compileStringReplace();
1178 case GetRegExpObjectLastIndex:
1179 compileGetRegExpObjectLastIndex();
1181 case SetRegExpObjectLastIndex:
1182 compileSetRegExpObjectLastIndex();
1184 case LogShadowChickenPrologue:
1185 compileLogShadowChickenPrologue();
1187 case LogShadowChickenTail:
1188 compileLogShadowChickenTail();
1190 case RecordRegExpCachedResult:
1191 compileRecordRegExpCachedResult();
1193 case ResolveScopeForHoistingFuncDeclInEval:
1194 compileResolveScopeForHoistingFuncDeclInEval();
1197 compileResolveScope();
1200 compileGetDynamicVar();
1203 compilePutDynamicVar();
1206 compileUnreachable();
1209 compileStringSlice();
1212 compileToLowerCase();
1214 case NumberToStringWithRadix:
1215 compileNumberToStringWithRadix();
1217 case NumberToStringWithValidRadixConstant:
1218 compileNumberToStringWithValidRadixConstant();
1221 compileCheckSubClass();
1227 compileCallDOMGetter();
1235 case PhantomNewObject:
1236 case PhantomNewFunction:
1237 case PhantomNewGeneratorFunction:
1238 case PhantomNewAsyncGeneratorFunction:
1239 case PhantomNewAsyncFunction:
1240 case PhantomCreateActivation:
1241 case PhantomDirectArguments:
1242 case PhantomCreateRest:
1244 case PhantomNewArrayWithSpread:
1245 case PhantomClonedArguments:
1249 case InitializeEntrypointArguments:
1252 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1256 if (m_node->isTerminal())
1259 if (!m_state.isValid()) {
1260 safelyInvalidateAfterTermination();
1264 m_availabilityCalculator.executeNode(m_node);
1265 m_interpreter.executeEffects(nodeIndex);
1270 void compileUpsilon()
1272 LValue upsilonValue = nullptr;
1273 switch (m_node->child1().useKind()) {
1275 upsilonValue = lowDouble(m_node->child1());
1279 upsilonValue = lowInt32(m_node->child1());
1282 upsilonValue = lowInt52(m_node->child1());
1285 case KnownBooleanUse:
1286 upsilonValue = lowBoolean(m_node->child1());
1290 upsilonValue = lowCell(m_node->child1());
1293 upsilonValue = lowJSValue(m_node->child1());
1296 DFG_CRASH(m_graph, m_node, "Bad use kind");
1299 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1300 LValue phiNode = m_phis.get(m_node->phi());
1301 m_out.addIncomingToPhi(phiNode, upsilon);
1306 LValue phi = m_phis.get(m_node);
1307 m_out.m_block->append(phi);
1309 switch (m_node->flags() & NodeResultMask) {
1310 case NodeResultDouble:
1313 case NodeResultInt32:
1316 case NodeResultInt52:
1319 case NodeResultBoolean:
1326 DFG_CRASH(m_graph, m_node, "Bad use kind");
1331 void compileDoubleConstant()
1333 setDouble(m_out.constDouble(m_node->asNumber()));
1336 void compileInt52Constant()
1338 int64_t value = m_node->asAnyInt();
1340 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1341 setStrictInt52(m_out.constInt64(value));
1344 void compileLazyJSConstant()
1346 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1347 LazyJSValue value = m_node->lazyJSValue();
1348 patchpoint->setGenerator(
1349 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1350 value.emit(jit, JSValueRegs(params[0].gpr()));
1352 patchpoint->effects = Effects::none();
1353 setJSValue(patchpoint);
1356 void compileDoubleRep()
1358 switch (m_node->child1().useKind()) {
1359 case RealNumberUse: {
1360 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1362 LValue doubleValue = unboxDouble(value);
1364 LBasicBlock intCase = m_out.newBlock();
1365 LBasicBlock continuation = m_out.newBlock();
1367 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1369 m_out.doubleEqual(doubleValue, doubleValue),
1370 usually(continuation), rarely(intCase));
1372 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1375 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1376 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1377 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1378 m_out.jump(continuation);
1380 m_out.appendTo(continuation, lastNext);
1382 setDouble(m_out.phi(Double, fastResult, slowResult));
1388 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1390 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1392 LBasicBlock intCase = m_out.newBlock();
1393 LBasicBlock doubleTesting = m_out.newBlock();
1394 LBasicBlock doubleCase = m_out.newBlock();
1395 LBasicBlock nonDoubleCase = m_out.newBlock();
1396 LBasicBlock continuation = m_out.newBlock();
1399 isNotInt32(value, provenType(m_node->child1())),
1400 unsure(doubleTesting), unsure(intCase));
1402 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1404 ValueFromBlock intToDouble = m_out.anchor(
1405 m_out.intToDouble(unboxInt32(value)));
1406 m_out.jump(continuation);
1408 m_out.appendTo(doubleTesting, doubleCase);
1409 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1410 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1412 m_out.appendTo(doubleCase, nonDoubleCase);
1413 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1414 m_out.jump(continuation);
1416 if (shouldConvertNonNumber) {
1417 LBasicBlock undefinedCase = m_out.newBlock();
1418 LBasicBlock testNullCase = m_out.newBlock();
1419 LBasicBlock nullCase = m_out.newBlock();
1420 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1421 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1422 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1424 m_out.appendTo(nonDoubleCase, undefinedCase);
1425 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1426 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1428 m_out.appendTo(undefinedCase, testNullCase);
1429 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1430 m_out.jump(continuation);
1432 m_out.appendTo(testNullCase, nullCase);
1433 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1434 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1436 m_out.appendTo(nullCase, testBooleanTrueCase);
1437 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1438 m_out.jump(continuation);
1440 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1441 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1442 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1444 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1445 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1446 m_out.jump(continuation);
1448 m_out.appendTo(convertBooleanFalseCase, continuation);
1450 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1451 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
1452 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1453 m_out.jump(continuation);
1455 m_out.appendTo(continuation, lastNext);
1456 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1459 m_out.appendTo(nonDoubleCase, continuation);
1460 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1461 m_out.unreachable();
1463 m_out.appendTo(continuation, lastNext);
1465 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1470 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1475 DFG_CRASH(m_graph, m_node, "Bad use kind");
1479 void compileDoubleAsInt32()
1481 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1482 setInt32(integerValue);
1485 void compileValueRep()
1487 switch (m_node->child1().useKind()) {
1488 case DoubleRepUse: {
1489 LValue value = lowDouble(m_node->child1());
1491 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1492 value = m_out.select(
1493 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1496 setJSValue(boxDouble(value));
1501 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1506 DFG_CRASH(m_graph, m_node, "Bad use kind");
1510 void compileInt52Rep()
1512 switch (m_node->child1().useKind()) {
1514 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1519 jsValueToStrictInt52(
1520 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1523 case DoubleRepAnyIntUse:
1525 doubleToStrictInt52(
1526 m_node->child1(), lowDouble(m_node->child1())));
1530 RELEASE_ASSERT_NOT_REACHED();
1534 void compileValueToInt32()
1536 switch (m_node->child1().useKind()) {
1538 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1542 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1547 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1548 if (isValid(value)) {
1549 setInt32(value.value());
1553 value = m_jsValueValues.get(m_node->child1().node());
1554 if (isValid(value)) {
1555 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1559 // We'll basically just get here for constants. But it's good to have this
1560 // catch-all since we often add new representations into the mix.
1562 numberOrNotCellToInt32(
1564 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1569 DFG_CRASH(m_graph, m_node, "Bad use kind");
1574 void compileBooleanToNumber()
1576 switch (m_node->child1().useKind()) {
1578 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1583 LValue value = lowJSValue(m_node->child1());
1585 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1586 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1590 LBasicBlock booleanCase = m_out.newBlock();
1591 LBasicBlock continuation = m_out.newBlock();
1593 ValueFromBlock notBooleanResult = m_out.anchor(value);
1595 isBoolean(value, provenType(m_node->child1())),
1596 unsure(booleanCase), unsure(continuation));
1598 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1599 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1600 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1601 m_out.jump(continuation);
1603 m_out.appendTo(continuation, lastNext);
1604 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1609 RELEASE_ASSERT_NOT_REACHED();
1614 void compileExtractOSREntryLocal()
1616 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1617 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1618 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1621 void compileExtractCatchLocal()
1623 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
1624 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
1627 void compileGetStack()
1629 StackAccessData* data = m_node->stackAccessData();
1630 AbstractValue& value = m_state.variables().operand(data->local);
1632 DFG_ASSERT(m_graph, m_node, isConcrete(data->format));
1633 DFG_ASSERT(m_graph, m_node, data->format != FlushedDouble); // This just happens to not arise for GetStacks, right now. It would be trivial to support.
1635 if (isInt32Speculation(value.m_type))
1636 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1638 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1641 void compilePutStack()
1643 StackAccessData* data = m_node->stackAccessData();
1644 switch (data->format) {
1645 case FlushedJSValue: {
1646 LValue value = lowJSValue(m_node->child1());
1647 m_out.store64(value, addressFor(data->machineLocal));
1651 case FlushedDouble: {
1652 LValue value = lowDouble(m_node->child1());
1653 m_out.storeDouble(value, addressFor(data->machineLocal));
1657 case FlushedInt32: {
1658 LValue value = lowInt32(m_node->child1());
1659 m_out.store32(value, payloadFor(data->machineLocal));
1663 case FlushedInt52: {
1664 LValue value = lowInt52(m_node->child1());
1665 m_out.store64(value, addressFor(data->machineLocal));
1670 LValue value = lowCell(m_node->child1());
1671 m_out.store64(value, addressFor(data->machineLocal));
1675 case FlushedBoolean: {
1676 speculateBoolean(m_node->child1());
1678 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1679 addressFor(data->machineLocal));
1684 DFG_CRASH(m_graph, m_node, "Bad flush format");
1691 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1694 void compileToObjectOrCallObjectConstructor()
1696 LValue value = lowJSValue(m_node->child1());
1698 LBasicBlock isCellCase = m_out.newBlock();
1699 LBasicBlock slowCase = m_out.newBlock();
1700 LBasicBlock continuation = m_out.newBlock();
1702 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1704 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1705 ValueFromBlock fastResult = m_out.anchor(value);
1706 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
1708 m_out.appendTo(slowCase, continuation);
1710 ValueFromBlock slowResult;
1711 if (m_node->op() == ToObject) {
1712 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
1713 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToObject), m_callFrame, weakPointer(globalObject), value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
1715 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCallObjectConstructor), m_callFrame, frozenPointer(m_node->cellOperand()), value));
1716 m_out.jump(continuation);
1718 m_out.appendTo(continuation, lastNext);
1719 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1722 void compileToThis()
1724 LValue value = lowJSValue(m_node->child1());
1726 LBasicBlock isCellCase = m_out.newBlock();
1727 LBasicBlock slowCase = m_out.newBlock();
1728 LBasicBlock continuation = m_out.newBlock();
1731 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
1733 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
1734 ValueFromBlock fastResult = m_out.anchor(value);
1737 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
1738 m_out.constInt32(OverridesToThis)),
1739 usually(continuation), rarely(slowCase));
1741 m_out.appendTo(slowCase, continuation);
1742 J_JITOperation_EJ function;
1743 if (m_graph.isStrictModeFor(m_node->origin.semantic))
1744 function = operationToThisStrict;
1746 function = operationToThis;
1747 ValueFromBlock slowResult = m_out.anchor(
1748 vmCall(Int64, m_out.operation(function), m_callFrame, value));
1749 m_out.jump(continuation);
1751 m_out.appendTo(continuation, lastNext);
1752 setJSValue(m_out.phi(Int64, fastResult, slowResult));
1755 void compileValueAdd()
1757 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
1758 JITAddIC* addIC = codeBlock()->addJITAddIC(arithProfile);
1759 auto repatchingFunction = operationValueAddOptimize;
1760 auto nonRepatchingFunction = operationValueAdd;
1761 compileMathIC(addIC, repatchingFunction, nonRepatchingFunction);
1764 template <typename Generator>
1765 void compileMathIC(JITUnaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1767 Node* node = m_node;
1769 LValue operand = lowJSValue(node->child1());
1771 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1772 patchpoint->appendSomeRegister(operand);
1773 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1774 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1775 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
1776 patchpoint->numGPScratchRegisters = 1;
1777 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1778 State* state = &m_ftlState;
1779 patchpoint->setGenerator(
1780 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1781 AllowMacroScratchRegisterUsage allowScratch(jit);
1783 Box<CCallHelpers::JumpList> exceptions =
1784 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1786 #if ENABLE(MATH_IC_STATS)
1787 auto inlineStart = jit.label();
1790 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1791 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
1793 bool shouldEmitProfiling = false;
1794 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1796 if (generatedInline) {
1797 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1798 auto done = jit.label();
1799 params.addLatePath([=] (CCallHelpers& jit) {
1800 AllowMacroScratchRegisterUsage allowScratch(jit);
1801 mathICGenerationState->slowPathJumps.link(&jit);
1802 mathICGenerationState->slowPathStart = jit.label();
1803 #if ENABLE(MATH_IC_STATS)
1804 auto slowPathStart = jit.label();
1807 if (mathICGenerationState->shouldSlowPathRepatch) {
1808 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1809 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1810 mathICGenerationState->slowPathCall = call.call();
1812 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1813 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1814 mathICGenerationState->slowPathCall = call.call();
1816 jit.jump().linkTo(done, &jit);
1818 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1819 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1822 #if ENABLE(MATH_IC_STATS)
1823 auto slowPathEnd = jit.label();
1824 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1825 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
1826 mathIC->m_generatedCodeSize += size;
1832 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1833 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
1836 #if ENABLE(MATH_IC_STATS)
1837 auto inlineEnd = jit.label();
1838 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1839 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
1840 mathIC->m_generatedCodeSize += size;
1845 setJSValue(patchpoint);
1848 template <typename Generator>
1849 void compileMathIC(JITBinaryMathIC<Generator>* mathIC, FunctionPtr repatchingFunction, FunctionPtr nonRepatchingFunction)
1851 Node* node = m_node;
1853 LValue left = lowJSValue(node->child1());
1854 LValue right = lowJSValue(node->child2());
1856 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
1857 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
1859 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1860 patchpoint->appendSomeRegister(left);
1861 patchpoint->appendSomeRegister(right);
1862 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
1863 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
1864 RefPtr<PatchpointExceptionHandle> exceptionHandle =
1865 preparePatchpointForExceptions(patchpoint);
1866 patchpoint->numGPScratchRegisters = 1;
1867 patchpoint->numFPScratchRegisters = 2;
1868 patchpoint->clobber(RegisterSet::macroScratchRegisters());
1869 State* state = &m_ftlState;
1870 patchpoint->setGenerator(
1871 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1872 AllowMacroScratchRegisterUsage allowScratch(jit);
1874 Box<CCallHelpers::JumpList> exceptions =
1875 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
1877 #if ENABLE(MATH_IC_STATS)
1878 auto inlineStart = jit.label();
1881 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
1882 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
1883 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
1884 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
1886 bool shouldEmitProfiling = false;
1887 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
1889 if (generatedInline) {
1890 ASSERT(!mathICGenerationState->slowPathJumps.empty());
1891 auto done = jit.label();
1892 params.addLatePath([=] (CCallHelpers& jit) {
1893 AllowMacroScratchRegisterUsage allowScratch(jit);
1894 mathICGenerationState->slowPathJumps.link(&jit);
1895 mathICGenerationState->slowPathStart = jit.label();
1896 #if ENABLE(MATH_IC_STATS)
1897 auto slowPathStart = jit.label();
1900 if (mathICGenerationState->shouldSlowPathRepatch) {
1901 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1902 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
1903 mathICGenerationState->slowPathCall = call.call();
1905 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
1906 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1907 mathICGenerationState->slowPathCall = call.call();
1909 jit.jump().linkTo(done, &jit);
1911 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1912 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
1915 #if ENABLE(MATH_IC_STATS)
1916 auto slowPathEnd = jit.label();
1917 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1918 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
1919 mathIC->m_generatedCodeSize += size;
1925 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
1926 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
1929 #if ENABLE(MATH_IC_STATS)
1930 auto inlineEnd = jit.label();
1931 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
1932 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
1933 mathIC->m_generatedCodeSize += size;
1938 setJSValue(patchpoint);
1941 void compileStrCat()
1944 if (m_node->child3()) {
1946 Int64, m_out.operation(operationStrCat3), m_callFrame,
1947 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1948 lowJSValue(m_node->child2(), ManualOperandSpeculation),
1949 lowJSValue(m_node->child3(), ManualOperandSpeculation));
1952 Int64, m_out.operation(operationStrCat2), m_callFrame,
1953 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1954 lowJSValue(m_node->child2(), ManualOperandSpeculation));
1959 void compileArithAddOrSub()
1961 bool isSub = m_node->op() == ArithSub;
1962 switch (m_node->binaryUseKind()) {
1964 LValue left = lowInt32(m_node->child1());
1965 LValue right = lowInt32(m_node->child2());
1967 if (!shouldCheckOverflow(m_node->arithMode())) {
1968 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
1972 CheckValue* result =
1973 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1974 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1980 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)
1981 && !abstractValue(m_node->child2()).couldBeType(SpecInt52Only)) {
1983 LValue left = lowWhicheverInt52(m_node->child1(), kind);
1984 LValue right = lowInt52(m_node->child2(), kind);
1985 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
1989 LValue left = lowInt52(m_node->child1());
1990 LValue right = lowInt52(m_node->child2());
1991 CheckValue* result =
1992 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
1993 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
1998 case DoubleRepUse: {
1999 LValue C1 = lowDouble(m_node->child1());
2000 LValue C2 = lowDouble(m_node->child2());
2002 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2008 DFG_CRASH(m_graph, m_node, "Bad use kind");
2012 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2013 JITSubIC* subIC = codeBlock()->addJITSubIC(arithProfile);
2014 auto repatchingFunction = operationValueSubOptimize;
2015 auto nonRepatchingFunction = operationValueSub;
2016 compileMathIC(subIC, repatchingFunction, nonRepatchingFunction);
2021 DFG_CRASH(m_graph, m_node, "Bad use kind");
2026 void compileArithClz32()
2028 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2029 LValue operand = lowInt32(m_node->child1());
2030 setInt32(m_out.ctlz32(operand));
2033 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2034 LValue argument = lowJSValue(m_node->child1());
2035 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
2039 void compileArithMul()
2041 switch (m_node->binaryUseKind()) {
2043 LValue left = lowInt32(m_node->child1());
2044 LValue right = lowInt32(m_node->child2());
2048 if (!shouldCheckOverflow(m_node->arithMode()))
2049 result = m_out.mul(left, right);
2051 CheckValue* speculation = m_out.speculateMul(left, right);
2052 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2053 result = speculation;
2056 if (shouldCheckNegativeZero(m_node->arithMode())) {
2057 LBasicBlock slowCase = m_out.newBlock();
2058 LBasicBlock continuation = m_out.newBlock();
2061 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2063 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2064 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2065 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2066 m_out.jump(continuation);
2067 m_out.appendTo(continuation, lastNext);
2076 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2077 LValue right = lowInt52(m_node->child2(), opposite(kind));
2079 CheckValue* result = m_out.speculateMul(left, right);
2080 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2082 if (shouldCheckNegativeZero(m_node->arithMode())) {
2083 LBasicBlock slowCase = m_out.newBlock();
2084 LBasicBlock continuation = m_out.newBlock();
2087 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2089 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2090 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2091 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2092 m_out.jump(continuation);
2093 m_out.appendTo(continuation, lastNext);
2100 case DoubleRepUse: {
2102 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2107 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2108 JITMulIC* mulIC = codeBlock()->addJITMulIC(arithProfile);
2109 auto repatchingFunction = operationValueMulOptimize;
2110 auto nonRepatchingFunction = operationValueMul;
2111 compileMathIC(mulIC, repatchingFunction, nonRepatchingFunction);
2116 DFG_CRASH(m_graph, m_node, "Bad use kind");
2121 void compileArithDiv()
2123 switch (m_node->binaryUseKind()) {
2125 LValue numerator = lowInt32(m_node->child1());
2126 LValue denominator = lowInt32(m_node->child2());
2128 if (shouldCheckNegativeZero(m_node->arithMode())) {
2129 LBasicBlock zeroNumerator = m_out.newBlock();
2130 LBasicBlock numeratorContinuation = m_out.newBlock();
2133 m_out.isZero32(numerator),
2134 rarely(zeroNumerator), usually(numeratorContinuation));
2136 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2139 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2141 m_out.jump(numeratorContinuation);
2143 m_out.appendTo(numeratorContinuation, innerLastNext);
2146 if (shouldCheckOverflow(m_node->arithMode())) {
2147 LBasicBlock unsafeDenominator = m_out.newBlock();
2148 LBasicBlock continuation = m_out.newBlock();
2150 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2152 m_out.above(adjustedDenominator, m_out.int32One),
2153 usually(continuation), rarely(unsafeDenominator));
2155 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2156 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2157 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2158 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2159 m_out.jump(continuation);
2161 m_out.appendTo(continuation, lastNext);
2162 LValue result = m_out.div(numerator, denominator);
2164 Overflow, noValue(), 0,
2165 m_out.notEqual(m_out.mul(result, denominator), numerator));
2168 setInt32(m_out.chillDiv(numerator, denominator));
2173 case DoubleRepUse: {
2174 setDouble(m_out.doubleDiv(
2175 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2180 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2185 DFG_CRASH(m_graph, m_node, "Bad use kind");
2190 void compileArithMod()
2192 switch (m_node->binaryUseKind()) {
2194 LValue numerator = lowInt32(m_node->child1());
2195 LValue denominator = lowInt32(m_node->child2());
2198 if (shouldCheckOverflow(m_node->arithMode())) {
2199 LBasicBlock unsafeDenominator = m_out.newBlock();
2200 LBasicBlock continuation = m_out.newBlock();
2202 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2204 m_out.above(adjustedDenominator, m_out.int32One),
2205 usually(continuation), rarely(unsafeDenominator));
2207 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2208 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2209 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2210 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2211 m_out.jump(continuation);
2213 m_out.appendTo(continuation, lastNext);
2214 LValue result = m_out.mod(numerator, denominator);
2217 remainder = m_out.chillMod(numerator, denominator);
2219 if (shouldCheckNegativeZero(m_node->arithMode())) {
2220 LBasicBlock negativeNumerator = m_out.newBlock();
2221 LBasicBlock numeratorContinuation = m_out.newBlock();
2224 m_out.lessThan(numerator, m_out.int32Zero),
2225 unsure(negativeNumerator), unsure(numeratorContinuation));
2227 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2229 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2231 m_out.jump(numeratorContinuation);
2233 m_out.appendTo(numeratorContinuation, innerLastNext);
2236 setInt32(remainder);
2240 case DoubleRepUse: {
2242 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2247 DFG_CRASH(m_graph, m_node, "Bad use kind");
2252 void compileArithMinOrMax()
2254 switch (m_node->binaryUseKind()) {
2256 LValue left = lowInt32(m_node->child1());
2257 LValue right = lowInt32(m_node->child2());
2261 m_node->op() == ArithMin
2262 ? m_out.lessThan(left, right)
2263 : m_out.lessThan(right, left),
2268 case DoubleRepUse: {
2269 LValue left = lowDouble(m_node->child1());
2270 LValue right = lowDouble(m_node->child2());
2272 LBasicBlock notLessThan = m_out.newBlock();
2273 LBasicBlock continuation = m_out.newBlock();
2275 Vector<ValueFromBlock, 2> results;
2277 results.append(m_out.anchor(left));
2279 m_node->op() == ArithMin
2280 ? m_out.doubleLessThan(left, right)
2281 : m_out.doubleGreaterThan(left, right),
2282 unsure(continuation), unsure(notLessThan));
2284 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2285 results.append(m_out.anchor(m_out.select(
2286 m_node->op() == ArithMin
2287 ? m_out.doubleGreaterThanOrEqual(left, right)
2288 : m_out.doubleLessThanOrEqual(left, right),
2289 right, m_out.constDouble(PNaN))));
2290 m_out.jump(continuation);
2292 m_out.appendTo(continuation, lastNext);
2293 setDouble(m_out.phi(Double, results));
2298 DFG_CRASH(m_graph, m_node, "Bad use kind");
2303 void compileArithAbs()
2305 switch (m_node->child1().useKind()) {
2307 LValue value = lowInt32(m_node->child1());
2309 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2310 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2312 if (shouldCheckOverflow(m_node->arithMode()))
2313 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2319 case DoubleRepUse: {
2320 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2325 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2326 LValue argument = lowJSValue(m_node->child1());
2327 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2334 void compileArithUnary()
2336 if (m_node->child1().useKind() == DoubleRepUse) {
2337 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
2340 LValue argument = lowJSValue(m_node->child1());
2341 LValue result = vmCall(Double, m_out.operation(DFG::arithUnaryOperation(m_node->arithUnaryType())), m_callFrame, argument);
2345 void compileArithPow()
2347 if (m_node->child2().useKind() == Int32Use)
2348 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2350 LValue base = lowDouble(m_node->child1());
2351 LValue exponent = lowDouble(m_node->child2());
2353 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2354 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2355 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2356 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2357 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2358 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2359 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2360 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2361 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2362 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2363 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2364 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2365 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2366 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2367 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2368 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2369 LBasicBlock powBlock = m_out.newBlock();
2370 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2371 LBasicBlock continuation = m_out.newBlock();
2373 LValue integerExponent = m_out.doubleToInt(exponent);
2374 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2375 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2376 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2378 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2379 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2380 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2382 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2383 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2384 m_out.jump(continuation);
2386 // If y is NaN, the result is NaN.
2387 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2388 LValue exponentIsNaN;
2389 if (provenType(m_node->child2()) & SpecDoubleNaN)
2390 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2392 exponentIsNaN = m_out.booleanFalse;
2393 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2395 // If abs(x) is 1 and y is +infinity, the result is NaN.
2396 // If abs(x) is 1 and y is -infinity, the result is NaN.
2398 // Test if base == 1.
2399 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2400 LValue absoluteBase = m_out.doubleAbs(base);
2401 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2402 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2404 // Test if abs(y) == Infinity.
2405 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2406 LValue absoluteExponent = m_out.doubleAbs(exponent);
2407 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2408 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2410 // If y == 0.5 or y == -0.5, handle it through SQRT.
2411 // We have be carefuly with -0 and -Infinity.
2414 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2415 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2416 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2419 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2420 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2421 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2422 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2424 // Test if abs(x) == Infinity.
2425 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2426 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2427 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2429 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2430 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2431 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2432 m_out.jump(continuation);
2434 // The exponent is 0.5, the base is infinite, the result is always infinite.
2435 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2436 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2437 m_out.jump(continuation);
2439 // Test if y == -0.5
2440 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2441 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2442 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2445 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2446 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2447 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2449 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2450 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2451 m_out.jump(continuation);
2453 // Test if abs(x) == Infinity.
2454 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2455 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2456 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2458 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2459 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2460 LValue sqrtBase = m_out.doubleSqrt(base);
2461 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2462 m_out.jump(continuation);
2464 // The exponent is -0.5, the base is infinite, the result is always zero.
2465 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2466 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2467 m_out.jump(continuation);
2469 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2470 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2471 m_out.jump(continuation);
2473 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2474 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2475 m_out.jump(continuation);
2477 m_out.appendTo(continuation, lastNext);
2478 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2482 void compileArithRandom()
2484 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2486 // Inlined WeakRandom::advance().
2487 // uint64_t x = m_low;
2488 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2489 LValue low = m_out.load64(m_out.absolute(lowAddress));
2490 // uint64_t y = m_high;
2491 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2492 LValue high = m_out.load64(m_out.absolute(highAddress));
2494 m_out.store64(high, m_out.absolute(lowAddress));
2497 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2500 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2502 // x ^= y ^ (y >> 26);
2503 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2506 m_out.store64(phase3, m_out.absolute(highAddress));
2509 LValue random64 = m_out.add(phase3, high);
2511 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2512 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2514 LValue double53Integer = m_out.intToDouble(random53);
2516 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2517 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2518 static const double scale = 1.0 / (1ULL << 53);
2520 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2521 // It just reduces the exp part of the given 53bit double integer.
2522 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2523 // Now we get 53bit precision random double value in [0, 1).
2524 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2529 void compileArithRound()
2531 if (m_node->child1().useKind() == DoubleRepUse) {
2532 LValue result = nullptr;
2533 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2534 LValue value = lowDouble(m_node->child1());
2535 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2537 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2538 LBasicBlock continuation = m_out.newBlock();
2540 LValue value = lowDouble(m_node->child1());
2541 LValue integerValue = m_out.doubleCeil(value);
2542 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2544 LValue realPart = m_out.doubleSub(integerValue, value);
2546 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2548 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2549 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2550 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2551 m_out.jump(continuation);
2552 m_out.appendTo(continuation, lastNext);
2554 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2557 if (producesInteger(m_node->arithRoundingMode())) {
2558 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2559 setInt32(integerValue);
2565 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2566 LValue argument = lowJSValue(m_node->child1());
2567 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2570 void compileArithFloor()
2572 if (m_node->child1().useKind() == DoubleRepUse) {
2573 LValue value = lowDouble(m_node->child1());
2574 LValue integerValue = m_out.doubleFloor(value);
2575 if (producesInteger(m_node->arithRoundingMode()))
2576 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2578 setDouble(integerValue);
2581 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2582 LValue argument = lowJSValue(m_node->child1());
2583 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2586 void compileArithCeil()
2588 if (m_node->child1().useKind() == DoubleRepUse) {
2589 LValue value = lowDouble(m_node->child1());
2590 LValue integerValue = m_out.doubleCeil(value);
2591 if (producesInteger(m_node->arithRoundingMode()))
2592 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2594 setDouble(integerValue);
2597 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2598 LValue argument = lowJSValue(m_node->child1());
2599 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2602 void compileArithTrunc()
2604 if (m_node->child1().useKind() == DoubleRepUse) {
2605 LValue value = lowDouble(m_node->child1());
2606 LValue result = m_out.doubleTrunc(value);
2607 if (producesInteger(m_node->arithRoundingMode()))
2608 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2613 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2614 LValue argument = lowJSValue(m_node->child1());
2615 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
2618 void compileArithSqrt()
2620 if (m_node->child1().useKind() == DoubleRepUse) {
2621 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
2624 LValue argument = lowJSValue(m_node->child1());
2625 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
2629 void compileArithFRound()
2631 if (m_node->child1().useKind() == DoubleRepUse) {
2632 setDouble(m_out.fround(lowDouble(m_node->child1())));
2635 LValue argument = lowJSValue(m_node->child1());
2636 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
2640 void compileArithNegate()
2642 switch (m_node->child1().useKind()) {
2644 LValue value = lowInt32(m_node->child1());
2647 if (!shouldCheckOverflow(m_node->arithMode()))
2648 result = m_out.neg(value);
2649 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
2650 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
2651 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
2654 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
2655 result = m_out.neg(value);
2663 if (!abstractValue(m_node->child1()).couldBeType(SpecInt52Only)) {
2665 LValue value = lowWhicheverInt52(m_node->child1(), kind);
2666 LValue result = m_out.neg(value);
2667 if (shouldCheckNegativeZero(m_node->arithMode()))
2668 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2669 setInt52(result, kind);
2673 LValue value = lowInt52(m_node->child1());
2674 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
2675 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
2676 if (shouldCheckNegativeZero(m_node->arithMode()))
2677 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
2682 case DoubleRepUse: {
2683 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
2688 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
2689 ArithProfile* arithProfile = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic)->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex);
2690 JITNegIC* negIC = codeBlock()->addJITNegIC(arithProfile);
2691 auto repatchingFunction = operationArithNegateOptimize;
2692 auto nonRepatchingFunction = operationArithNegate;
2693 compileMathIC(negIC, repatchingFunction, nonRepatchingFunction);
2698 void compileBitAnd()
2700 if (m_node->isBinaryUseKind(UntypedUse)) {
2701 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
2704 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2709 if (m_node->isBinaryUseKind(UntypedUse)) {
2710 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
2713 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2716 void compileBitXor()
2718 if (m_node->isBinaryUseKind(UntypedUse)) {
2719 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
2722 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
2725 void compileBitRShift()
2727 if (m_node->isBinaryUseKind(UntypedUse)) {
2728 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
2731 setInt32(m_out.aShr(
2732 lowInt32(m_node->child1()),
2733 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2736 void compileBitLShift()
2738 if (m_node->isBinaryUseKind(UntypedUse)) {
2739 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
2743 lowInt32(m_node->child1()),
2744 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2747 void compileBitURShift()
2749 if (m_node->isBinaryUseKind(UntypedUse)) {
2750 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
2753 setInt32(m_out.lShr(
2754 lowInt32(m_node->child1()),
2755 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
2758 void compileUInt32ToNumber()
2760 LValue value = lowInt32(m_node->child1());
2762 if (doesOverflow(m_node->arithMode())) {
2763 setStrictInt52(m_out.zeroExtPtr(value));
2767 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
2771 void compileCheckStructure()
2774 if (m_node->child1()->hasConstant())
2775 exitKind = BadConstantCache;
2777 exitKind = BadCache;
2779 switch (m_node->child1().useKind()) {
2781 case KnownCellUse: {
2782 LValue cell = lowCell(m_node->child1());
2785 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2786 exitKind, m_node->structureSet(),
2787 [&] (RegisteredStructure structure) {
2788 return weakStructureID(structure);
2793 case CellOrOtherUse: {
2794 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
2796 LBasicBlock cellCase = m_out.newBlock();
2797 LBasicBlock notCellCase = m_out.newBlock();
2798 LBasicBlock continuation = m_out.newBlock();
2801 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2803 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2805 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
2806 exitKind, m_node->structureSet(),
2807 [&] (RegisteredStructure structure) {
2808 return weakStructureID(structure);
2810 m_out.jump(continuation);
2812 m_out.appendTo(notCellCase, continuation);
2813 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
2814 m_out.jump(continuation);
2816 m_out.appendTo(continuation, lastNext);
2821 DFG_CRASH(m_graph, m_node, "Bad use kind");
2826 void compileCheckStructureOrEmpty()
2829 if (m_node->child1()->hasConstant())
2830 exitKind = BadConstantCache;
2832 exitKind = BadCache;
2834 LValue cell = lowCell(m_node->child1());
2835 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
2836 LBasicBlock notEmpty;
2837 LBasicBlock continuation;
2838 LBasicBlock lastNext;
2839 if (maySeeEmptyValue) {
2840 notEmpty = m_out.newBlock();
2841 continuation = m_out.newBlock();
2842 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
2843 lastNext = m_out.appendTo(notEmpty, continuation);
2847 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
2848 exitKind, m_node->structureSet(),
2849 [&] (RegisteredStructure structure) {
2850 return weakStructureID(structure);
2853 if (maySeeEmptyValue) {
2854 m_out.jump(continuation);
2855 m_out.appendTo(continuation, lastNext);
2859 void compileCheckCell()
2861 LValue cell = lowCell(m_node->child1());
2864 BadCell, jsValueValue(cell), m_node->child1().node(),
2865 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
2868 void compileCheckBadCell()
2873 void compileCheckNotEmpty()
2875 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
2878 void compileCheckStringIdent()
2880 UniquedStringImpl* uid = m_node->uidOperand();
2881 LValue stringImpl = lowStringIdent(m_node->child1());
2882 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
2885 void compileGetExecutable()
2887 LValue cell = lowCell(m_node->child1());
2888 speculateFunction(m_node->child1(), cell);
2889 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
2892 void compileArrayify()
2894 LValue cell = lowCell(m_node->child1());
2895 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
2897 LBasicBlock unexpectedStructure = m_out.newBlock();
2898 LBasicBlock continuation = m_out.newBlock();
2900 auto isUnexpectedArray = [&] (LValue cell) {
2901 if (m_node->op() == Arrayify)
2902 return m_out.logicalNot(isArrayTypeForArrayify(cell, m_node->arrayMode()));
2904 ASSERT(m_node->op() == ArrayifyToStructure);
2905 return m_out.notEqual(m_out.load32(cell, m_heaps.JSCell_structureID), weakStructureID(m_node->structure()));
2908 m_out.branch(isUnexpectedArray(cell), rarely(unexpectedStructure), usually(continuation));
2910 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
2913 switch (m_node->arrayMode().type()) {
2916 case Array::Contiguous:
2918 Uncountable, noValue(), 0,
2919 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
2926 switch (m_node->arrayMode().type()) {
2928 vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
2931 vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
2933 case Array::Contiguous:
2934 vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
2936 case Array::ArrayStorage:
2937 case Array::SlowPutArrayStorage:
2938 vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
2941 DFG_CRASH(m_graph, m_node, "Bad array type");
2945 speculate(BadIndexingType, jsValueValue(cell), 0, isUnexpectedArray(cell));
2946 m_out.jump(continuation);
2948 m_out.appendTo(continuation, lastNext);
2951 void compilePutStructure()
2953 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
2955 RegisteredStructure oldStructure = m_node->transition()->previous;
2956 RegisteredStructure newStructure = m_node->transition()->next;
2957 ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
2958 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
2959 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
2961 LValue cell = lowCell(m_node->child1());
2963 weakStructureID(newStructure),
2964 cell, m_heaps.JSCell_structureID);
2967 void compileGetById(AccessType type)
2969 ASSERT(type == AccessType::Get || type == AccessType::TryGet);
2970 switch (m_node->child1().useKind()) {
2972 setJSValue(getById(lowCell(m_node->child1()), type));
2977 // This is pretty weird, since we duplicate the slow path both here and in the
2978 // code generated by the IC. We should investigate making this less bad.
2979 // https://bugs.webkit.org/show_bug.cgi?id=127830
2980 LValue value = lowJSValue(m_node->child1());
2982 LBasicBlock cellCase = m_out.newBlock();
2983 LBasicBlock notCellCase = m_out.newBlock();
2984 LBasicBlock continuation = m_out.newBlock();
2987 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
2989 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
2990 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
2991 m_out.jump(continuation);
2993 J_JITOperation_EJI getByIdFunction;
2994 if (type == AccessType::Get)
2995 getByIdFunction = operationGetByIdGeneric;
2997 getByIdFunction = operationTryGetByIdGeneric;
2999 m_out.appendTo(notCellCase, continuation);
3000 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3001 Int64, m_out.operation(getByIdFunction),
3003 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3004 m_out.jump(continuation);
3006 m_out.appendTo(continuation, lastNext);
3007 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3012 DFG_CRASH(m_graph, m_node, "Bad use kind");
3017 void compileGetByIdWithThis()
3019 if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
3020 setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
3022 LValue base = lowJSValue(m_node->child1());
3023 LValue thisValue = lowJSValue(m_node->child2());
3025 LBasicBlock baseCellCase = m_out.newBlock();
3026 LBasicBlock notCellCase = m_out.newBlock();
3027 LBasicBlock thisValueCellCase = m_out.newBlock();
3028 LBasicBlock continuation = m_out.newBlock();
3031 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
3033 LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
3036 isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
3038 m_out.appendTo(thisValueCellCase, notCellCase);
3039 ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
3040 m_out.jump(continuation);
3042 m_out.appendTo(notCellCase, continuation);
3043 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3044 Int64, m_out.operation(operationGetByIdWithThis),
3045 m_callFrame, base, thisValue,
3046 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3047 m_out.jump(continuation);
3049 m_out.appendTo(continuation, lastNext);
3050 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3055 void compileGetByValWithThis()
3057 LValue base = lowJSValue(m_node->child1());
3058 LValue thisValue = lowJSValue(m_node->child2());
3059 LValue subscript = lowJSValue(m_node->child3());
3061 LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
3065 void compilePutByIdWithThis()
3067 LValue base = lowJSValue(m_node->child1());
3068 LValue thisValue = lowJSValue(m_node->child2());
3069 LValue value = lowJSValue(m_node->child3());
3071 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
3072 m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
3075 void compilePutByValWithThis()
3077 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
3078 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
3079 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
3080 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
3082 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
3083 m_callFrame, base, thisValue, property, value);
3086 void compileAtomicsReadModifyWrite()
3088 TypedArrayType type = m_node->arrayMode().typedArrayType();
3089 unsigned numExtraArgs = numExtraAtomicsArgs(m_node->op());
3090 Edge baseEdge = m_graph.child(m_node, 0);
3091 Edge indexEdge = m_graph.child(m_node, 1);
3092 Edge argEdges[maxNumExtraAtomicsArgs];
3093 for (unsigned i = numExtraArgs; i--;)
3094 argEdges[i] = m_graph.child(m_node, 2 + i);
3095 Edge storageEdge = m_graph.child(m_node, 2 + numExtraArgs);
3097 auto operation = [&] () -> LValue {
3098 switch (m_node->op()) {
3100 return m_out.operation(operationAtomicsAdd);
3102 return m_out.operation(operationAtomicsAnd);
3103 case AtomicsCompareExchange:
3104 return m_out.operation(operationAtomicsCompareExchange);
3105 case AtomicsExchange:
3106 return m_out.operation(operationAtomicsExchange);
3108 return m_out.operation(operationAtomicsLoad);
3110 return m_out.operation(operationAtomicsOr);
3112 return m_out.operation(operationAtomicsStore);
3114 return m_out.operation(operationAtomicsSub);
3116 return m_out.operation(operationAtomicsXor);
3118 RELEASE_ASSERT_NOT_REACHED();
3124 Vector<LValue> args;
3125 args.append(m_callFrame);
3126 args.append(lowJSValue(baseEdge));
3127 args.append(lowJSValue(indexEdge));
3128 for (unsigned i = 0; i < numExtraArgs; ++i)
3129 args.append(lowJSValue(argEdges[i]));
3130 LValue result = vmCall(Int64, operation(), args);
3135 LValue index = lowInt32(indexEdge);
3137 for (unsigned i = numExtraArgs; i--;)
3138 args[i] = getIntTypedArrayStoreOperand(argEdges[i]);
3139 LValue storage = lowStorage(storageEdge);
3141 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
3142 Width width = widthForBytes(elementSize(type));
3147 auto sanitizeResult&n