2 * Copyright (C) 2013-2019 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLLowerDFGToB3.h"
32 #include "AirGenerationContext.h"
33 #include "AllowMacroScratchRegisterUsage.h"
34 #include "AllowMacroScratchRegisterUsageIf.h"
35 #include "AtomicsObject.h"
36 #include "B3CheckValue.h"
37 #include "B3FenceValue.h"
38 #include "B3PatchpointValue.h"
39 #include "B3SlotBaseValue.h"
40 #include "B3StackmapGenerationParams.h"
41 #include "B3ValueInlines.h"
42 #include "CallFrameShuffler.h"
43 #include "CodeBlockWithJITType.h"
44 #include "DFGAbstractInterpreterInlines.h"
45 #include "DFGCapabilities.h"
46 #include "DFGDoesGC.h"
47 #include "DFGDominators.h"
48 #include "DFGInPlaceAbstractState.h"
49 #include "DFGLivenessAnalysisPhase.h"
50 #include "DFGMayExit.h"
51 #include "DFGOSRAvailabilityAnalysisPhase.h"
52 #include "DFGOSRExitFuzz.h"
53 #include "DirectArguments.h"
54 #include "FTLAbstractHeapRepository.h"
55 #include "FTLAvailableRecovery.h"
56 #include "FTLExceptionTarget.h"
57 #include "FTLForOSREntryJITCode.h"
58 #include "FTLFormattedValue.h"
59 #include "FTLLazySlowPathCall.h"
60 #include "FTLLoweredNodeValue.h"
61 #include "FTLOperations.h"
62 #include "FTLOutput.h"
63 #include "FTLPatchpointExceptionHandle.h"
64 #include "FTLSnippetParams.h"
65 #include "FTLThunks.h"
66 #include "FTLWeightedTarget.h"
67 #include "JITAddGenerator.h"
68 #include "JITBitAndGenerator.h"
69 #include "JITBitOrGenerator.h"
70 #include "JITBitXorGenerator.h"
71 #include "JITDivGenerator.h"
72 #include "JITInlineCacheGenerator.h"
73 #include "JITLeftShiftGenerator.h"
74 #include "JITMathIC.h"
75 #include "JITMulGenerator.h"
76 #include "JITRightShiftGenerator.h"
77 #include "JITSubGenerator.h"
78 #include "JSAsyncFunction.h"
79 #include "JSAsyncGeneratorFunction.h"
80 #include "JSCInlines.h"
81 #include "JSGeneratorFunction.h"
82 #include "JSImmutableButterfly.h"
83 #include "JSLexicalEnvironment.h"
85 #include "OperandsInlines.h"
86 #include "ProbeContext.h"
87 #include "RegExpObject.h"
88 #include "ScopedArguments.h"
89 #include "ScopedArgumentsTable.h"
90 #include "ScratchRegisterAllocator.h"
91 #include "SetupVarargsFrame.h"
92 #include "ShadowChicken.h"
93 #include "StructureStubInfo.h"
94 #include "SuperSampler.h"
95 #include "ThunkGenerators.h"
96 #include "VirtualRegister.h"
100 #include <wtf/Gigacage.h>
101 #include <wtf/RecursableLambda.h>
102 #include <wtf/StdUnorderedSet.h>
104 #undef RELEASE_ASSERT
105 #define RELEASE_ASSERT(assertion) do { \
106 if (!(assertion)) { \
107 WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
112 namespace JSC { namespace FTL {
119 std::atomic<int> compileCounter;
122 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
123 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
125 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
126 if (nodeIndex != UINT_MAX)
127 dataLog(", node @", nodeIndex);
133 // Using this instead of typeCheck() helps to reduce the load on B3, by creating
134 // significantly less dead code.
135 #define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
136 FormattedValue _ftc_lowValue = (lowValue); \
137 Edge _ftc_highValue = (highValue); \
138 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
139 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
141 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
144 #define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
145 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
148 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
150 LowerDFGToB3(State& state)
151 : m_graph(state.graph)
154 , m_proc(*state.proc)
155 , m_availabilityCalculator(m_graph)
156 , m_state(state.graph)
157 , m_interpreter(state.graph, m_state)
158 , m_indexMaskingMode(Options::enableSpectreMitigations() ? IndexMaskingEnabled : IndexMaskingDisabled)
160 if (Options::validateAbstractInterpreterState()) {
161 performLivenessAnalysis(m_graph);
163 // We only use node liveness here, not combined liveness, as we only track
164 // AI state for live nodes.
165 for (DFG::BasicBlock* block : m_graph.blocksInNaturalOrder()) {
168 for (NodeFlowProjection node : block->ssa->liveAtTail) {
169 if (node.kind() == NodeFlowProjection::Primary)
170 live.addVoid(node.node());
173 for (unsigned i = block->size(); i--; ) {
174 Node* node = block->at(i);
176 m_graph.doToChildren(node, [&] (Edge child) {
177 live.addVoid(child.node());
179 m_liveInToNode.add(node, live);
187 State* state = &m_ftlState;
190 if (verboseCompilationEnabled()) {
192 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
193 "_", codeBlock()->hash());
198 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
199 CodeBlock* codeBlock = m_graph.m_codeBlock;
201 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
202 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
203 AllowMacroScratchRegisterUsage allowScratch(jit);
204 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
205 if (Options::zeroStackFrame())
206 jit.clearStackFrame(GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister, GPRInfo::regT0, code.frameSize());
208 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
209 jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
212 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
213 RELEASE_ASSERT(catchEntrypointIndex != 0);
214 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
217 if (m_graph.m_maxLocalsForCatchOSREntry) {
218 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
219 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
223 m_graph.ensureSSADominators();
225 if (verboseCompilationEnabled())
226 dataLog("Function ready, beginning lowering.\n");
228 m_out.initialize(m_heaps);
230 // We use prologue frequency for all of the initialization code.
231 m_out.setFrequency(1);
233 bool hasMultipleEntrypoints = m_graph.m_numberOfEntrypoints > 1;
235 LBasicBlock prologue = m_out.newBlock();
236 LBasicBlock callEntrypointArgumentSpeculations = hasMultipleEntrypoints ? m_out.newBlock() : nullptr;
237 m_handleExceptions = m_out.newBlock();
239 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
240 m_highBlock = m_graph.block(blockIndex);
243 m_out.setFrequency(m_highBlock->executionCount);
244 m_blocks.add(m_highBlock, m_out.newBlock());
247 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
248 m_out.setFrequency(1);
250 m_out.appendTo(prologue, hasMultipleEntrypoints ? callEntrypointArgumentSpeculations : m_handleExceptions);
251 m_out.initializeConstants(m_proc, prologue);
252 createPhiVariables();
254 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
255 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
256 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
257 state->capturedValue = capturedBase->slot();
259 auto preOrder = m_graph.blocksInPreOrder();
261 m_callFrame = m_out.framePointer();
262 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
263 m_tagMask = m_out.constInt64(TagMask);
265 // Make sure that B3 knows that we really care about the mask registers. This forces the
266 // constants to be materialized in registers.
267 m_proc.addFastConstant(m_tagTypeNumber->key());
268 m_proc.addFastConstant(m_tagMask->key());
270 // We don't want the CodeBlock to have a weak pointer to itself because
271 // that would cause it to always get collected.
272 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
274 VM* vm = &this->vm();
276 // Stack Overflow Check.
277 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
278 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
279 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
280 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
281 stackOverflowHandler->appendSomeRegister(m_callFrame);
282 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
283 stackOverflowHandler->numGPScratchRegisters = 1;
284 stackOverflowHandler->setGenerator(
285 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
286 AllowMacroScratchRegisterUsage allowScratch(jit);
287 GPRReg fp = params[0].gpr();
288 GPRReg scratch = params.gpScratch(0);
290 unsigned ftlFrameSize = params.proc().frameSize();
291 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
293 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
294 MacroAssembler::JumpList stackOverflow;
295 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
296 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
297 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
299 params.addLatePath([=] (CCallHelpers& jit) {
300 AllowMacroScratchRegisterUsage allowScratch(jit);
302 stackOverflow.link(&jit);
304 // FIXME: We would not have to do this if the stack check was part of the Air
305 // prologue. Then, we would know that there is no way for the callee-saves to
307 // https://bugs.webkit.org/show_bug.cgi?id=172456
308 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
311 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
312 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
313 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
315 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
316 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
317 CCallHelpers::Call throwCall = jit.call(OperationPtrTag);
319 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
320 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
321 CCallHelpers::Call lookupExceptionHandlerCall = jit.call(OperationPtrTag);
322 jit.jumpToExceptionHandler(*vm);
325 [=] (LinkBuffer& linkBuffer) {
326 linkBuffer.link(throwCall, FunctionPtr<OperationPtrTag>(operationThrowStackOverflowError));
327 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame));
332 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
335 if (hasMultipleEntrypoints) {
336 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
337 successors[0] = callEntrypointArgumentSpeculations;
338 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
339 // Currently, the only other entrypoint is an op_catch entrypoint.
340 // We do OSR entry at op_catch, and we prove argument formats before
341 // jumping to FTL code, so we don't need to check argument types here
342 // for these entrypoints.
343 successors[i] = firstDFGBasicBlock;
346 m_out.entrySwitch(successors);
347 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
351 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
354 availabilityMap().clear();
355 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
356 for (unsigned i = codeBlock()->numParameters(); i--;) {
357 availabilityMap().m_locals.argument(i) =
358 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
361 for (unsigned i = codeBlock()->numParameters(); i--;) {
362 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
363 VirtualRegister operand = virtualRegisterForArgument(i);
364 LValue jsValue = m_out.load64(addressFor(operand));
366 switch (m_graph.m_argumentFormats[0][i]) {
368 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
371 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
374 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
379 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
383 m_out.jump(firstDFGBasicBlock);
387 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
388 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
389 m_out.patchpoint(Void)->setGenerator(
390 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
391 CCallHelpers::Jump jump = jit.jump();
393 [=] (LinkBuffer& linkBuffer) {
394 linkBuffer.link(jump, linkBuffer.locationOf<ExceptionHandlerPtrTag>(*exceptionHandler));
399 for (DFG::BasicBlock* block : preOrder)
402 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
403 // to happen last because our abstract heaps are generated lazily. They have to be
404 // generated lazily because we have an infinite number of numbered, indexed, and
405 // absolute heaps. We only become aware of the ones we actually mention while lowering.
406 m_heaps.computeRangesAndDecorateInstructions();
408 // We create all Phi's up front, but we may then decide not to compile the basic block
409 // that would have contained one of them. So this creates orphans, which triggers B3
410 // validation failures. Calling this fixes the issue.
412 // Note that you should avoid the temptation to make this call conditional upon
413 // validation being enabled. B3 makes no guarantees of any kind of correctness when
414 // dealing with IR that would have failed validation. For example, it would be valid to
415 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
416 // if any orphans were around. We might even have such phases already.
417 m_proc.deleteOrphans();
419 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
420 m_out.applyBlockOrder();
425 void createPhiVariables()
427 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
428 DFG::BasicBlock* block = m_graph.block(blockIndex);
431 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
432 Node* node = block->at(nodeIndex);
433 if (node->op() != DFG::Phi)
436 switch (node->flags() & NodeResultMask) {
437 case NodeResultDouble:
440 case NodeResultInt32:
443 case NodeResultInt52:
446 case NodeResultBoolean:
453 DFG_CRASH(m_graph, node, "Bad Phi node result type");
456 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
461 void compileBlock(DFG::BasicBlock* block)
466 if (verboseCompilationEnabled())
467 dataLog("Compiling block ", *block, "\n");
471 // Make sure that any blocks created while lowering code in the high block have the frequency of
472 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
473 // something roughly approximate for things like register allocation.
474 m_out.setFrequency(m_highBlock->executionCount);
476 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
479 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
480 m_nextHighBlock = m_graph.block(nextBlockIndex);
484 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
486 // All of this effort to find the next block gives us the ability to keep the
487 // generated IR in roughly program order. This ought not affect the performance
488 // of the generated code (since we expect B3 to reorder things) but it will
489 // make IR dumps easier to read.
490 m_out.appendTo(lowBlock, m_nextLowBlock);
492 if (Options::ftlCrashes())
495 if (!m_highBlock->cfaHasVisited) {
496 if (verboseCompilationEnabled())
497 dataLog("Bailing because CFA didn't reach.\n");
498 crash(m_highBlock, nullptr);
502 m_aiCheckedNodes.clear();
504 m_availabilityCalculator.beginBlock(m_highBlock);
507 m_state.beginBasicBlock(m_highBlock);
509 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
510 if (!compileNode(m_nodeIndex))
515 void safelyInvalidateAfterTermination()
517 if (verboseCompilationEnabled())
518 dataLog("Bailing.\n");
521 // Invalidate dominated blocks. Under normal circumstances we would expect
522 // them to be invalidated already. But you can have the CFA become more
523 // precise over time because the structures of objects change on the main
524 // thread. Failing to do this would result in weird crashes due to a value
525 // being used but not defined. Race conditions FTW!
526 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
527 DFG::BasicBlock* target = m_graph.block(blockIndex);
530 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
531 if (verboseCompilationEnabled())
532 dataLog("Block ", *target, " will bail also.\n");
533 target->cfaHasVisited = false;
538 void validateAIState(Node* node)
541 StringPrintStream out;
543 m_graphDump = out.toString();
546 switch (node->op()) {
562 // Before we execute node.
563 NodeSet& live = m_liveInToNode.find(node)->value;
564 unsigned highParentIndex = node->index();
566 uint64_t hash = WTF::intHash(highParentIndex);
567 if (hash >= static_cast<uint64_t>((static_cast<double>(std::numeric_limits<unsigned>::max()) + 1) * Options::validateAbstractInterpreterStateProbability()))
571 for (Node* node : live) {
572 if (node->isPhantomAllocation())
575 if (node->op() == CheckInBounds)
578 AbstractValue value = m_interpreter.forNode(node);
580 auto iter = m_aiCheckedNodes.find(node);
581 if (iter != m_aiCheckedNodes.end()) {
582 AbstractValue checkedValue = iter->value;
583 if (checkedValue == value) {
584 if (!(value.m_type & SpecCell))
588 m_aiCheckedNodes.set(node, value);
591 FlushFormat flushFormat;
593 if (node->hasJSResult()) {
594 input = lowJSValue(Edge(node, UntypedUse));
595 flushFormat = FlushedJSValue;
596 } else if (node->hasDoubleResult()) {
597 input = lowDouble(Edge(node, DoubleRepUse));
598 flushFormat = FlushedDouble;
599 } else if (node->hasInt52Result()) {
600 input = strictInt52ToJSValue(lowStrictInt52(Edge(node, Int52RepUse)));
601 flushFormat = FlushedInt52;
605 unsigned highChildIndex = node->index();
607 String graphDump = m_graphDump;
609 PatchpointValue* patchpoint = m_out.patchpoint(Void);
610 patchpoint->effects = Effects::none();
611 patchpoint->effects.writesLocalState = true;
612 patchpoint->appendSomeRegister(input);
613 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
614 GPRReg reg = InvalidGPRReg;
615 FPRReg fpReg = InvalidFPRReg;
616 if (flushFormat == FlushedDouble)
617 fpReg = params[0].fpr();
619 reg = params[0].gpr();
620 jit.probe([=] (Probe::Context& context) {
624 auto dumpAndCrash = [&] {
625 dataLogLn("Validation failed at node: @", highParentIndex);
626 dataLogLn("Failed validating live value: @", highChildIndex);
628 dataLogLn("Expected AI value = ", value);
629 if (flushFormat != FlushedDouble)
630 dataLogLn("Unexpected value = ", input);
632 dataLogLn("Unexpected double value = ", doubleInput);
634 dataLogLn(graphDump);
638 if (flushFormat == FlushedDouble) {
639 doubleInput = context.fpr(fpReg);
641 if (!std::isnan(doubleInput))
642 type = speculationFromValue(jsDoubleNumber(doubleInput));
643 else if (isImpureNaN(doubleInput))
644 type = SpecDoubleImpureNaN;
646 type = SpecDoublePureNaN;
648 if (!value.couldBeType(type))
651 input = JSValue::decode(context.gpr(reg));
652 if (flushFormat == FlushedInt52) {
653 RELEASE_ASSERT(input.isAnyInt());
654 input = jsDoubleNumber(input.asAnyInt());
656 if (!value.validateOSREntryValue(input, flushFormat))
665 bool compileNode(unsigned nodeIndex)
667 if (!m_state.isValid()) {
668 safelyInvalidateAfterTermination();
672 m_node = m_highBlock->at(nodeIndex);
673 m_origin = m_node->origin;
674 m_out.setOrigin(m_node);
676 if (verboseCompilationEnabled())
677 dataLog("Lowering ", m_node, "\n");
679 m_interpreter.startExecuting();
680 m_interpreter.executeKnownEdgeTypes(m_node);
682 if (Options::validateAbstractInterpreterState())
683 validateAIState(m_node);
685 if (validateDFGDoesGC) {
686 bool expectDoesGC = doesGC(m_graph, m_node);
687 m_out.store(m_out.constBool(expectDoesGC), m_out.absolute(vm().heap.addressOfExpectDoesGC()));
690 switch (m_node->op()) {
700 compileDoubleConstant();
703 compileInt52Constant();
706 compileLazyJSConstant();
712 compileDoubleAsInt32();
721 compileValueToInt32();
723 case BooleanToNumber:
724 compileBooleanToNumber();
726 case ExtractOSREntryLocal:
727 compileExtractOSREntryLocal();
729 case ExtractCatchLocal:
730 compileExtractCatchLocal();
732 case ClearCatchLocals:
733 compileClearCatchLocals();
746 case CallObjectConstructor:
747 compileToObjectOrCallObjectConstructor();
753 compileValueNegate();
769 compileArithAddOrSub();
791 compileArithMinOrMax();
803 compileArithRandom();
821 compileArithFRound();
824 compileArithNegate();
830 compileValueBitNot();
833 compileArithBitNot();
836 compileValueBitAnd();
839 compileArithBitAnd();
848 compileArithBitXor();
851 compileValueBitXor();
863 compileUInt32ToNumber();
866 compileCheckStructure();
868 case CheckStructureOrEmpty:
869 compileCheckStructureOrEmpty();
875 compileCheckNotEmpty();
878 compileAssertNotEmpty();
881 compileCheckBadCell();
883 case CheckStringIdent:
884 compileCheckStringIdent();
887 compileGetExecutable();
890 case ArrayifyToStructure:
894 compilePutStructure();
897 compileGetById(AccessType::TryGet);
901 compileGetById(AccessType::Get);
903 case GetByIdWithThis:
904 compileGetByIdWithThis();
907 case GetByIdDirectFlush:
908 compileGetById(AccessType::GetDirect);
917 compileHasOwnProperty();
924 case PutByIdWithThis:
925 compilePutByIdWithThis();
929 compilePutAccessorById();
931 case PutGetterSetterById:
932 compilePutGetterSetterById();
936 compilePutAccessorByVal();
942 compileDeleteByVal();
945 compileGetButterfly();
947 case ConstantStoragePointer:
948 compileConstantStoragePointer();
950 case GetIndexedPropertyStorage:
951 compileGetIndexedPropertyStorage();
957 compileGetArrayLength();
959 case GetVectorLength:
960 compileGetVectorLength();
963 compileCheckInBounds();
968 case GetMyArgumentByVal:
969 case GetMyArgumentByValOutOfBounds:
970 compileGetMyArgumentByVal();
972 case GetByValWithThis:
973 compileGetByValWithThis();
980 case PutByValWithThis:
981 compilePutByValWithThis();
985 case AtomicsCompareExchange:
986 case AtomicsExchange:
992 compileAtomicsReadModifyWrite();
994 case AtomicsIsLockFree:
995 compileAtomicsIsLockFree();
997 case DefineDataProperty:
998 compileDefineDataProperty();
1000 case DefineAccessorProperty:
1001 compileDefineAccessorProperty();
1010 compileArraySlice();
1013 compileArrayIndexOf();
1015 case CreateActivation:
1016 compileCreateActivation();
1019 compilePushWithScope();
1022 case NewGeneratorFunction:
1023 case NewAsyncGeneratorFunction:
1024 case NewAsyncFunction:
1025 compileNewFunction();
1027 case CreateDirectArguments:
1028 compileCreateDirectArguments();
1030 case CreateScopedArguments:
1031 compileCreateScopedArguments();
1033 case CreateClonedArguments:
1034 compileCreateClonedArguments();
1037 compileObjectCreate();
1040 compileObjectKeys();
1045 case NewStringObject:
1046 compileNewStringObject();
1054 case NewArrayWithSpread:
1055 compileNewArrayWithSpread();
1058 compileCreateThis();
1063 case NewArrayBuffer:
1064 compileNewArrayBuffer();
1066 case NewArrayWithSize:
1067 compileNewArrayWithSize();
1070 compileNewTypedArray();
1072 case GetTypedArrayByteOffset:
1073 compileGetTypedArrayByteOffset();
1075 case GetPrototypeOf:
1076 compileGetPrototypeOf();
1078 case AllocatePropertyStorage:
1079 compileAllocatePropertyStorage();
1081 case ReallocatePropertyStorage:
1082 compileReallocatePropertyStorage();
1084 case NukeStructureAndSetButterfly:
1085 compileNukeStructureAndSetButterfly();
1091 case CallStringConstructor:
1093 compileToStringOrCallStringConstructorOrStringValueOf();
1096 compileToPrimitive();
1102 compileStringCharAt();
1104 case StringCharCodeAt:
1105 compileStringCharCodeAt();
1107 case StringFromCharCode:
1108 compileStringFromCharCode();
1111 case GetGetterSetterByOffset:
1112 compileGetByOffset();
1120 case MultiGetByOffset:
1121 compileMultiGetByOffset();
1124 compilePutByOffset();
1126 case MultiPutByOffset:
1127 compileMultiPutByOffset();
1129 case MatchStructure:
1130 compileMatchStructure();
1133 case GetGlobalLexicalVariable:
1134 compileGetGlobalVariable();
1136 case PutGlobalVariable:
1137 compilePutGlobalVariable();
1140 compileNotifyWrite();
1148 case GetArgumentCountIncludingThis:
1149 compileGetArgumentCountIncludingThis();
1151 case SetArgumentCountIncludingThis:
1152 compileSetArgumentCountIncludingThis();
1160 case GetGlobalObject:
1161 compileGetGlobalObject();
1164 compileGetGlobalThis();
1167 compileGetClosureVar();
1170 compilePutClosureVar();
1172 case GetFromArguments:
1173 compileGetFromArguments();
1175 case PutToArguments:
1176 compilePutToArguments();
1179 compileGetArgument();
1184 case CompareStrictEq:
1185 compileCompareStrictEq();
1188 compileCompareLess();
1191 compileCompareLessEq();
1193 case CompareGreater:
1194 compileCompareGreater();
1196 case CompareGreaterEq:
1197 compileCompareGreaterEq();
1200 compileCompareBelow();
1202 case CompareBelowEq:
1203 compileCompareBelowEq();
1206 compileCompareEqPtr();
1212 compileLogicalNot();
1215 case TailCallInlinedCaller:
1217 compileCallOrConstruct();
1220 case DirectTailCallInlinedCaller:
1221 case DirectConstruct:
1222 case DirectTailCall:
1223 compileDirectCallOrConstruct();
1229 case CallForwardVarargs:
1230 case TailCallVarargs:
1231 case TailCallVarargsInlinedCaller:
1232 case TailCallForwardVarargs:
1233 case TailCallForwardVarargsInlinedCaller:
1234 case ConstructVarargs:
1235 case ConstructForwardVarargs:
1236 compileCallOrConstructVarargs();
1242 compileLoadVarargs();
1244 case ForwardVarargs:
1245 compileForwardVarargs();
1256 case DFG::EntrySwitch:
1257 compileEntrySwitch();
1263 compileForceOSRExit();
1267 compileCPUIntrinsic();
1269 RELEASE_ASSERT_NOT_REACHED();
1275 case ThrowStaticError:
1276 compileThrowStaticError();
1278 case InvalidationPoint:
1279 compileInvalidationPoint();
1285 compileIsUndefined();
1287 case IsUndefinedOrNull:
1288 compileIsUndefinedOrNull();
1296 case NumberIsInteger:
1297 compileNumberIsInteger();
1299 case IsCellWithType:
1300 compileIsCellWithType();
1305 case NormalizeMapKey:
1306 compileNormalizeMapKey();
1309 compileGetMapBucket();
1311 case GetMapBucketHead:
1312 compileGetMapBucketHead();
1314 case GetMapBucketNext:
1315 compileGetMapBucketNext();
1317 case LoadKeyFromMapBucket:
1318 compileLoadKeyFromMapBucket();
1320 case LoadValueFromMapBucket:
1321 compileLoadValueFromMapBucket();
1323 case ExtractValueFromWeakMapGet:
1324 compileExtractValueFromWeakMapGet();
1333 compileWeakMapGet();
1336 compileWeakSetAdd();
1339 compileWeakMapSet();
1344 case IsObjectOrNull:
1345 compileIsObjectOrNull();
1348 compileIsFunction();
1350 case IsTypedArrayView:
1351 compileIsTypedArrayView();
1359 case CheckTypeInfoFlags:
1360 compileCheckTypeInfoFlags();
1362 case OverridesHasInstance:
1363 compileOverridesHasInstance();
1366 compileInstanceOf();
1368 case InstanceOfCustom:
1369 compileInstanceOfCustom();
1371 case CountExecution:
1372 compileCountExecution();
1374 case SuperSamplerBegin:
1375 compileSuperSamplerBegin();
1377 case SuperSamplerEnd:
1378 compileSuperSamplerEnd();
1381 case FencedStoreBarrier:
1382 compileStoreBarrier();
1384 case HasIndexedProperty:
1385 compileHasIndexedProperty();
1387 case HasGenericProperty:
1388 compileHasGenericProperty();
1390 case HasStructureProperty:
1391 compileHasStructureProperty();
1393 case GetDirectPname:
1394 compileGetDirectPname();
1396 case GetEnumerableLength:
1397 compileGetEnumerableLength();
1399 case GetPropertyEnumerator:
1400 compileGetPropertyEnumerator();
1402 case GetEnumeratorStructurePname:
1403 compileGetEnumeratorStructurePname();
1405 case GetEnumeratorGenericPname:
1406 compileGetEnumeratorGenericPname();
1409 compileToIndexString();
1411 case CheckStructureImmediate:
1412 compileCheckStructureImmediate();
1414 case MaterializeNewObject:
1415 compileMaterializeNewObject();
1417 case MaterializeCreateActivation:
1418 compileMaterializeCreateActivation();
1421 compileCheckTraps();
1424 compileCreateRest();
1427 compileGetRestLength();
1430 compileRegExpExec();
1432 case RegExpExecNonGlobalOrSticky:
1433 compileRegExpExecNonGlobalOrSticky();
1436 compileRegExpTest();
1438 case RegExpMatchFast:
1439 compileRegExpMatchFast();
1441 case RegExpMatchFastGlobal:
1442 compileRegExpMatchFastGlobal();
1447 case SetFunctionName:
1448 compileSetFunctionName();
1451 case StringReplaceRegExp:
1452 compileStringReplace();
1454 case GetRegExpObjectLastIndex:
1455 compileGetRegExpObjectLastIndex();
1457 case SetRegExpObjectLastIndex:
1458 compileSetRegExpObjectLastIndex();
1460 case LogShadowChickenPrologue:
1461 compileLogShadowChickenPrologue();
1463 case LogShadowChickenTail:
1464 compileLogShadowChickenTail();
1466 case RecordRegExpCachedResult:
1467 compileRecordRegExpCachedResult();
1469 case ResolveScopeForHoistingFuncDeclInEval:
1470 compileResolveScopeForHoistingFuncDeclInEval();
1473 compileResolveScope();
1476 compileGetDynamicVar();
1479 compilePutDynamicVar();
1482 compileUnreachable();
1485 compileStringSlice();
1488 compileToLowerCase();
1490 case NumberToStringWithRadix:
1491 compileNumberToStringWithRadix();
1493 case NumberToStringWithValidRadixConstant:
1494 compileNumberToStringWithValidRadixConstant();
1497 compileCheckSubClass();
1503 compileCallDOMGetter();
1505 case FilterCallLinkStatus:
1506 case FilterGetByIdStatus:
1507 case FilterPutByIdStatus:
1508 case FilterInByIdStatus:
1509 compileFilterICStatus();
1511 case DataViewGetInt:
1512 case DataViewGetFloat:
1513 compileDataViewGet();
1516 compileDataViewSet();
1524 case PhantomNewObject:
1525 case PhantomNewFunction:
1526 case PhantomNewGeneratorFunction:
1527 case PhantomNewAsyncGeneratorFunction:
1528 case PhantomNewAsyncFunction:
1529 case PhantomCreateActivation:
1530 case PhantomDirectArguments:
1531 case PhantomCreateRest:
1533 case PhantomNewArrayWithSpread:
1534 case PhantomNewArrayBuffer:
1535 case PhantomClonedArguments:
1536 case PhantomNewRegexp:
1540 case InitializeEntrypointArguments:
1543 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1547 if (m_node->isTerminal())
1550 if (!m_state.isValid()) {
1551 safelyInvalidateAfterTermination();
1555 m_availabilityCalculator.executeNode(m_node);
1556 m_interpreter.executeEffects(nodeIndex);
1561 void compileUpsilon()
1563 LValue upsilonValue = nullptr;
1564 switch (m_node->child1().useKind()) {
1566 upsilonValue = lowDouble(m_node->child1());
1570 upsilonValue = lowInt32(m_node->child1());
1573 upsilonValue = lowInt52(m_node->child1());
1576 case KnownBooleanUse:
1577 upsilonValue = lowBoolean(m_node->child1());
1581 upsilonValue = lowCell(m_node->child1());
1584 upsilonValue = lowJSValue(m_node->child1());
1587 DFG_CRASH(m_graph, m_node, "Bad use kind");
1590 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1591 LValue phiNode = m_phis.get(m_node->phi());
1592 m_out.addIncomingToPhi(phiNode, upsilon);
1597 LValue phi = m_phis.get(m_node);
1598 m_out.m_block->append(phi);
1600 switch (m_node->flags() & NodeResultMask) {
1601 case NodeResultDouble:
1604 case NodeResultInt32:
1607 case NodeResultInt52:
1610 case NodeResultBoolean:
1617 DFG_CRASH(m_graph, m_node, "Bad result type");
1622 void compileDoubleConstant()
1624 setDouble(m_out.constDouble(m_node->asNumber()));
1627 void compileInt52Constant()
1629 int64_t value = m_node->asAnyInt();
1631 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1632 setStrictInt52(m_out.constInt64(value));
1635 void compileLazyJSConstant()
1637 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1638 LazyJSValue value = m_node->lazyJSValue();
1639 patchpoint->setGenerator(
1640 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1641 value.emit(jit, JSValueRegs(params[0].gpr()));
1643 patchpoint->effects = Effects::none();
1644 setJSValue(patchpoint);
1647 void compileDoubleRep()
1649 switch (m_node->child1().useKind()) {
1650 case RealNumberUse: {
1651 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1653 LValue doubleValue = unboxDouble(value);
1655 LBasicBlock intCase = m_out.newBlock();
1656 LBasicBlock continuation = m_out.newBlock();
1658 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1660 m_out.doubleEqual(doubleValue, doubleValue),
1661 usually(continuation), rarely(intCase));
1663 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1666 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1667 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1668 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1669 m_out.jump(continuation);
1671 m_out.appendTo(continuation, lastNext);
1673 setDouble(m_out.phi(Double, fastResult, slowResult));
1679 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1681 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1683 LBasicBlock intCase = m_out.newBlock();
1684 LBasicBlock doubleTesting = m_out.newBlock();
1685 LBasicBlock doubleCase = m_out.newBlock();
1686 LBasicBlock nonDoubleCase = m_out.newBlock();
1687 LBasicBlock continuation = m_out.newBlock();
1690 isNotInt32(value, provenType(m_node->child1())),
1691 unsure(doubleTesting), unsure(intCase));
1693 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1695 ValueFromBlock intToDouble = m_out.anchor(
1696 m_out.intToDouble(unboxInt32(value)));
1697 m_out.jump(continuation);
1699 m_out.appendTo(doubleTesting, doubleCase);
1700 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1701 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1703 m_out.appendTo(doubleCase, nonDoubleCase);
1704 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1705 m_out.jump(continuation);
1707 if (shouldConvertNonNumber) {
1708 LBasicBlock undefinedCase = m_out.newBlock();
1709 LBasicBlock testNullCase = m_out.newBlock();
1710 LBasicBlock nullCase = m_out.newBlock();
1711 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1712 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1713 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1715 m_out.appendTo(nonDoubleCase, undefinedCase);
1716 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1717 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1719 m_out.appendTo(undefinedCase, testNullCase);
1720 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1721 m_out.jump(continuation);
1723 m_out.appendTo(testNullCase, nullCase);
1724 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1725 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1727 m_out.appendTo(nullCase, testBooleanTrueCase);
1728 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1729 m_out.jump(continuation);
1731 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1732 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1733 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1735 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1736 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1737 m_out.jump(continuation);
1739 m_out.appendTo(convertBooleanFalseCase, continuation);
1741 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1742 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
1743 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1744 m_out.jump(continuation);
1746 m_out.appendTo(continuation, lastNext);
1747 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1750 m_out.appendTo(nonDoubleCase, continuation);
1751 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1752 m_out.unreachable();
1754 m_out.appendTo(continuation, lastNext);
1756 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1761 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1766 DFG_CRASH(m_graph, m_node, "Bad use kind");
1770 void compileDoubleAsInt32()
1772 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1773 setInt32(integerValue);
1776 void compileValueRep()
1778 switch (m_node->child1().useKind()) {
1779 case DoubleRepUse: {
1780 LValue value = lowDouble(m_node->child1());
1782 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1783 value = m_out.select(
1784 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1787 setJSValue(boxDouble(value));
1792 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1797 DFG_CRASH(m_graph, m_node, "Bad use kind");
1801 void compileInt52Rep()
1803 switch (m_node->child1().useKind()) {
1805 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1810 jsValueToStrictInt52(
1811 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1814 case DoubleRepAnyIntUse:
1816 doubleToStrictInt52(
1817 m_node->child1(), lowDouble(m_node->child1())));
1821 RELEASE_ASSERT_NOT_REACHED();
1825 void compileValueToInt32()
1827 switch (m_node->child1().useKind()) {
1829 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1833 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1838 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1839 if (isValid(value)) {
1840 setInt32(value.value());
1844 value = m_jsValueValues.get(m_node->child1().node());
1845 if (isValid(value)) {
1846 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1850 // We'll basically just get here for constants. But it's good to have this
1851 // catch-all since we often add new representations into the mix.
1853 numberOrNotCellToInt32(
1855 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1860 DFG_CRASH(m_graph, m_node, "Bad use kind");
1865 void compileBooleanToNumber()
1867 switch (m_node->child1().useKind()) {
1869 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1874 LValue value = lowJSValue(m_node->child1());
1876 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1877 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1881 LBasicBlock booleanCase = m_out.newBlock();
1882 LBasicBlock continuation = m_out.newBlock();
1884 ValueFromBlock notBooleanResult = m_out.anchor(value);
1886 isBoolean(value, provenType(m_node->child1())),
1887 unsure(booleanCase), unsure(continuation));
1889 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1890 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1891 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1892 m_out.jump(continuation);
1894 m_out.appendTo(continuation, lastNext);
1895 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1900 RELEASE_ASSERT_NOT_REACHED();
1905 void compileExtractOSREntryLocal()
1907 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1908 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1909 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1912 void compileExtractCatchLocal()
1914 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
1915 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
1918 void compileClearCatchLocals()
1920 ScratchBuffer* scratchBuffer = m_ftlState.jitCode->common.catchOSREntryBuffer;
1921 ASSERT(scratchBuffer);
1922 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
1925 void compileGetStack()
1927 StackAccessData* data = m_node->stackAccessData();
1928 AbstractValue& value = m_state.operand(data->local);
1930 DFG_ASSERT(m_graph, m_node, isConcrete(data->format), data->format);
1932 switch (data->format) {
1934 setDouble(m_out.loadDouble(addressFor(data->machineLocal)));
1937 setInt52(m_out.load64(addressFor(data->machineLocal)));
1940 if (isInt32Speculation(value.m_type))
1941 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1943 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1948 void compilePutStack()
1950 StackAccessData* data = m_node->stackAccessData();
1951 switch (data->format) {
1952 case FlushedJSValue: {
1953 LValue value = lowJSValue(m_node->child1());
1954 m_out.store64(value, addressFor(data->machineLocal));
1958 case FlushedDouble: {
1959 LValue value = lowDouble(m_node->child1());
1960 m_out.storeDouble(value, addressFor(data->machineLocal));
1964 case FlushedInt32: {
1965 LValue value = lowInt32(m_node->child1());
1966 m_out.store32(value, payloadFor(data->machineLocal));
1970 case FlushedInt52: {
1971 LValue value = lowInt52(m_node->child1());
1972 m_out.store64(value, addressFor(data->machineLocal));
1977 LValue value = lowCell(m_node->child1());
1978 m_out.store64(value, addressFor(data->machineLocal));
1982 case FlushedBoolean: {
1983 speculateBoolean(m_node->child1());
1985 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1986 addressFor(data->machineLocal));
1991 DFG_CRASH(m_graph, m_node, "Bad flush format");
1998 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
2001 void compileToObjectOrCallObjectConstructor()
2003 LValue value = lowJSValue(m_node->child1());
2005 LBasicBlock isCellCase = m_out.newBlock();
2006 LBasicBlock slowCase = m_out.newBlock();
2007 LBasicBlock continuation = m_out.newBlock();
2009 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2011 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2012 ValueFromBlock fastResult = m_out.anchor(value);
2013 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
2015 m_out.appendTo(slowCase, continuation);
2017 ValueFromBlock slowResult;
2018 if (m_node->op() == ToObject) {
2019 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2020 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToObject), m_callFrame, weakPointer(globalObject), value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
2022 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCallObjectConstructor), m_callFrame, frozenPointer(m_node->cellOperand()), value));
2023 m_out.jump(continuation);
2025 m_out.appendTo(continuation, lastNext);
2026 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2029 void compileToThis()
2031 LValue value = lowJSValue(m_node->child1());
2033 LBasicBlock isCellCase = m_out.newBlock();
2034 LBasicBlock slowCase = m_out.newBlock();
2035 LBasicBlock continuation = m_out.newBlock();
2038 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2040 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2041 ValueFromBlock fastResult = m_out.anchor(value);
2044 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
2045 m_out.constInt32(OverridesToThis)),
2046 usually(continuation), rarely(slowCase));
2048 m_out.appendTo(slowCase, continuation);
2049 J_JITOperation_EJ function;
2050 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2051 function = operationToThisStrict;
2053 function = operationToThis;
2054 ValueFromBlock slowResult = m_out.anchor(
2055 vmCall(Int64, m_out.operation(function), m_callFrame, value));
2056 m_out.jump(continuation);
2058 m_out.appendTo(continuation, lastNext);
2059 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2062 void compileValueAdd()
2064 if (m_node->isBinaryUseKind(BigIntUse)) {
2065 LValue left = lowBigInt(m_node->child1());
2066 LValue right = lowBigInt(m_node->child2());
2068 LValue result = vmCall(pointerType(), m_out.operation(operationAddBigInt), m_callFrame, left, right);
2073 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2074 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2075 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2076 auto repatchingFunction = operationValueAddOptimize;
2077 auto nonRepatchingFunction = operationValueAdd;
2078 compileBinaryMathIC<JITAddGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2081 void compileValueSub()
2083 if (m_node->isBinaryUseKind(BigIntUse)) {
2084 LValue left = lowBigInt(m_node->child1());
2085 LValue right = lowBigInt(m_node->child2());
2087 LValue result = vmCall(pointerType(), m_out.operation(operationSubBigInt), m_callFrame, left, right);
2092 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2093 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2094 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2095 auto repatchingFunction = operationValueSubOptimize;
2096 auto nonRepatchingFunction = operationValueSub;
2097 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2100 void compileValueMul()
2102 if (m_node->isBinaryUseKind(BigIntUse)) {
2103 LValue left = lowBigInt(m_node->child1());
2104 LValue right = lowBigInt(m_node->child2());
2106 LValue result = vmCall(Int64, m_out.operation(operationMulBigInt), m_callFrame, left, right);
2111 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2112 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2113 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2114 auto repatchingFunction = operationValueMulOptimize;
2115 auto nonRepatchingFunction = operationValueMul;
2116 compileBinaryMathIC<JITMulGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2119 template <typename Generator, typename Func1, typename Func2,
2120 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2121 void compileUnaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2123 Node* node = m_node;
2125 LValue operand = lowJSValue(node->child1());
2127 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2128 patchpoint->appendSomeRegister(operand);
2129 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2130 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2131 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
2132 patchpoint->numGPScratchRegisters = 1;
2133 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2134 State* state = &m_ftlState;
2135 patchpoint->setGenerator(
2136 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2137 AllowMacroScratchRegisterUsage allowScratch(jit);
2139 Box<CCallHelpers::JumpList> exceptions =
2140 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2142 #if ENABLE(MATH_IC_STATS)
2143 auto inlineStart = jit.label();
2146 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2147 JITUnaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2148 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
2150 bool shouldEmitProfiling = false;
2151 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2153 if (generatedInline) {
2154 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2155 auto done = jit.label();
2156 params.addLatePath([=] (CCallHelpers& jit) {
2157 AllowMacroScratchRegisterUsage allowScratch(jit);
2158 mathICGenerationState->slowPathJumps.link(&jit);
2159 mathICGenerationState->slowPathStart = jit.label();
2160 #if ENABLE(MATH_IC_STATS)
2161 auto slowPathStart = jit.label();
2164 if (mathICGenerationState->shouldSlowPathRepatch) {
2165 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2166 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2167 mathICGenerationState->slowPathCall = call.call();
2169 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2170 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2171 mathICGenerationState->slowPathCall = call.call();
2173 jit.jump().linkTo(done, &jit);
2175 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2176 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2179 #if ENABLE(MATH_IC_STATS)
2180 auto slowPathEnd = jit.label();
2181 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2182 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2183 mathIC->m_generatedCodeSize += size;
2189 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2190 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2193 #if ENABLE(MATH_IC_STATS)
2194 auto inlineEnd = jit.label();
2195 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2196 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2197 mathIC->m_generatedCodeSize += size;
2202 setJSValue(patchpoint);
2205 template <typename Generator, typename Func1, typename Func2,
2206 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2207 void compileBinaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2209 Node* node = m_node;
2211 LValue left = lowJSValue(node->child1());
2212 LValue right = lowJSValue(node->child2());
2214 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
2215 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
2217 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2218 patchpoint->appendSomeRegister(left);
2219 patchpoint->appendSomeRegister(right);
2220 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2221 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2222 RefPtr<PatchpointExceptionHandle> exceptionHandle =
2223 preparePatchpointForExceptions(patchpoint);
2224 patchpoint->numGPScratchRegisters = 1;
2225 patchpoint->numFPScratchRegisters = 2;
2226 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2227 State* state = &m_ftlState;
2228 patchpoint->setGenerator(
2229 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2230 AllowMacroScratchRegisterUsage allowScratch(jit);
2233 Box<CCallHelpers::JumpList> exceptions =
2234 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2236 #if ENABLE(MATH_IC_STATS)
2237 auto inlineStart = jit.label();
2240 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2241 JITBinaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2242 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
2243 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
2244 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
2246 bool shouldEmitProfiling = false;
2247 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2249 if (generatedInline) {
2250 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2251 auto done = jit.label();
2252 params.addLatePath([=] (CCallHelpers& jit) {
2253 AllowMacroScratchRegisterUsage allowScratch(jit);
2254 mathICGenerationState->slowPathJumps.link(&jit);
2255 mathICGenerationState->slowPathStart = jit.label();
2256 #if ENABLE(MATH_IC_STATS)
2257 auto slowPathStart = jit.label();
2260 if (mathICGenerationState->shouldSlowPathRepatch) {
2261 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2262 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2263 mathICGenerationState->slowPathCall = call.call();
2265 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2266 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2267 mathICGenerationState->slowPathCall = call.call();
2269 jit.jump().linkTo(done, &jit);
2271 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2272 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2275 #if ENABLE(MATH_IC_STATS)
2276 auto slowPathEnd = jit.label();
2277 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2278 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2279 mathIC->m_generatedCodeSize += size;
2285 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2286 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2289 #if ENABLE(MATH_IC_STATS)
2290 auto inlineEnd = jit.label();
2291 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2292 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2293 mathIC->m_generatedCodeSize += size;
2298 setJSValue(patchpoint);
2301 void compileStrCat()
2304 if (m_node->child3()) {
2306 Int64, m_out.operation(operationStrCat3), m_callFrame,
2307 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2308 lowJSValue(m_node->child2(), ManualOperandSpeculation),
2309 lowJSValue(m_node->child3(), ManualOperandSpeculation));
2312 Int64, m_out.operation(operationStrCat2), m_callFrame,
2313 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2314 lowJSValue(m_node->child2(), ManualOperandSpeculation));
2319 void compileArithAddOrSub()
2321 bool isSub = m_node->op() == ArithSub;
2322 switch (m_node->binaryUseKind()) {
2324 LValue left = lowInt32(m_node->child1());
2325 LValue right = lowInt32(m_node->child2());
2327 if (!shouldCheckOverflow(m_node->arithMode())) {
2328 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
2332 CheckValue* result =
2333 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2334 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2340 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)
2341 && !abstractValue(m_node->child2()).couldBeType(SpecNonInt32AsInt52)) {
2343 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2344 LValue right = lowInt52(m_node->child2(), kind);
2345 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
2349 LValue left = lowInt52(m_node->child1());
2350 LValue right = lowInt52(m_node->child2());
2351 CheckValue* result =
2352 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2353 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2358 case DoubleRepUse: {
2359 LValue C1 = lowDouble(m_node->child1());
2360 LValue C2 = lowDouble(m_node->child2());
2362 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2368 DFG_CRASH(m_graph, m_node, "Bad use kind");
2372 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2373 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2374 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2375 auto repatchingFunction = operationValueSubOptimize;
2376 auto nonRepatchingFunction = operationValueSub;
2377 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2382 DFG_CRASH(m_graph, m_node, "Bad use kind");
2387 void compileArithClz32()
2389 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2390 LValue operand = lowInt32(m_node->child1());
2391 setInt32(m_out.ctlz32(operand));
2394 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2395 LValue argument = lowJSValue(m_node->child1());
2396 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
2400 void compileArithMul()
2402 switch (m_node->binaryUseKind()) {
2404 LValue left = lowInt32(m_node->child1());
2405 LValue right = lowInt32(m_node->child2());
2409 if (!shouldCheckOverflow(m_node->arithMode()))
2410 result = m_out.mul(left, right);
2412 CheckValue* speculation = m_out.speculateMul(left, right);
2413 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2414 result = speculation;
2417 if (shouldCheckNegativeZero(m_node->arithMode())) {
2418 LBasicBlock slowCase = m_out.newBlock();
2419 LBasicBlock continuation = m_out.newBlock();
2422 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2424 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2425 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2426 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2427 m_out.jump(continuation);
2428 m_out.appendTo(continuation, lastNext);
2437 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2438 LValue right = lowInt52(m_node->child2(), opposite(kind));
2440 CheckValue* result = m_out.speculateMul(left, right);
2441 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2443 if (shouldCheckNegativeZero(m_node->arithMode())) {
2444 LBasicBlock slowCase = m_out.newBlock();
2445 LBasicBlock continuation = m_out.newBlock();
2448 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2450 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2451 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2452 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2453 m_out.jump(continuation);
2454 m_out.appendTo(continuation, lastNext);
2461 case DoubleRepUse: {
2463 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2468 DFG_CRASH(m_graph, m_node, "Bad use kind");
2473 void compileValueDiv()
2475 if (m_node->isBinaryUseKind(BigIntUse)) {
2476 LValue left = lowBigInt(m_node->child1());
2477 LValue right = lowBigInt(m_node->child2());
2479 LValue result = vmCall(pointerType(), m_out.operation(operationDivBigInt), m_callFrame, left, right);
2484 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2487 void compileArithDiv()
2489 switch (m_node->binaryUseKind()) {
2491 LValue numerator = lowInt32(m_node->child1());
2492 LValue denominator = lowInt32(m_node->child2());
2494 if (shouldCheckNegativeZero(m_node->arithMode())) {
2495 LBasicBlock zeroNumerator = m_out.newBlock();
2496 LBasicBlock numeratorContinuation = m_out.newBlock();
2499 m_out.isZero32(numerator),
2500 rarely(zeroNumerator), usually(numeratorContinuation));
2502 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2505 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2507 m_out.jump(numeratorContinuation);
2509 m_out.appendTo(numeratorContinuation, innerLastNext);
2512 if (shouldCheckOverflow(m_node->arithMode())) {
2513 LBasicBlock unsafeDenominator = m_out.newBlock();
2514 LBasicBlock continuation = m_out.newBlock();
2516 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2518 m_out.above(adjustedDenominator, m_out.int32One),
2519 usually(continuation), rarely(unsafeDenominator));
2521 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2522 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2523 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2524 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2525 m_out.jump(continuation);
2527 m_out.appendTo(continuation, lastNext);
2528 LValue result = m_out.div(numerator, denominator);
2530 Overflow, noValue(), 0,
2531 m_out.notEqual(m_out.mul(result, denominator), numerator));
2534 setInt32(m_out.chillDiv(numerator, denominator));
2539 case DoubleRepUse: {
2540 setDouble(m_out.doubleDiv(
2541 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2546 DFG_CRASH(m_graph, m_node, "Bad use kind");
2551 void compileValueMod()
2553 if (m_node->binaryUseKind() == BigIntUse) {
2554 LValue left = lowBigInt(m_node->child1());
2555 LValue right = lowBigInt(m_node->child2());
2557 LValue result = vmCall(pointerType(), m_out.operation(operationModBigInt), m_callFrame, left, right);
2562 DFG_ASSERT(m_graph, m_node, m_node->binaryUseKind() == UntypedUse, m_node->binaryUseKind());
2563 LValue left = lowJSValue(m_node->child1());
2564 LValue right = lowJSValue(m_node->child2());
2565 LValue result = vmCall(Int64, m_out.operation(operationValueMod), m_callFrame, left, right);
2569 void compileArithMod()
2571 switch (m_node->binaryUseKind()) {
2573 LValue numerator = lowInt32(m_node->child1());
2574 LValue denominator = lowInt32(m_node->child2());
2577 if (shouldCheckOverflow(m_node->arithMode())) {
2578 LBasicBlock unsafeDenominator = m_out.newBlock();
2579 LBasicBlock continuation = m_out.newBlock();
2581 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2583 m_out.above(adjustedDenominator, m_out.int32One),
2584 usually(continuation), rarely(unsafeDenominator));
2586 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2587 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2588 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2589 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2590 m_out.jump(continuation);
2592 m_out.appendTo(continuation, lastNext);
2593 LValue result = m_out.mod(numerator, denominator);
2596 remainder = m_out.chillMod(numerator, denominator);
2598 if (shouldCheckNegativeZero(m_node->arithMode())) {
2599 LBasicBlock negativeNumerator = m_out.newBlock();
2600 LBasicBlock numeratorContinuation = m_out.newBlock();
2603 m_out.lessThan(numerator, m_out.int32Zero),
2604 unsure(negativeNumerator), unsure(numeratorContinuation));
2606 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2608 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2610 m_out.jump(numeratorContinuation);
2612 m_out.appendTo(numeratorContinuation, innerLastNext);
2615 setInt32(remainder);
2619 case DoubleRepUse: {
2621 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2626 DFG_CRASH(m_graph, m_node, "Bad use kind");
2631 void compileArithMinOrMax()
2633 switch (m_node->binaryUseKind()) {
2635 LValue left = lowInt32(m_node->child1());
2636 LValue right = lowInt32(m_node->child2());
2640 m_node->op() == ArithMin
2641 ? m_out.lessThan(left, right)
2642 : m_out.lessThan(right, left),
2647 case DoubleRepUse: {
2648 LValue left = lowDouble(m_node->child1());
2649 LValue right = lowDouble(m_node->child2());
2651 LBasicBlock notLessThan = m_out.newBlock();
2652 LBasicBlock continuation = m_out.newBlock();
2654 Vector<ValueFromBlock, 2> results;
2656 results.append(m_out.anchor(left));
2658 m_node->op() == ArithMin
2659 ? m_out.doubleLessThan(left, right)
2660 : m_out.doubleGreaterThan(left, right),
2661 unsure(continuation), unsure(notLessThan));
2663 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2664 results.append(m_out.anchor(m_out.select(
2665 m_node->op() == ArithMin
2666 ? m_out.doubleGreaterThanOrEqual(left, right)
2667 : m_out.doubleLessThanOrEqual(left, right),
2668 right, m_out.constDouble(PNaN))));
2669 m_out.jump(continuation);
2671 m_out.appendTo(continuation, lastNext);
2672 setDouble(m_out.phi(Double, results));
2677 DFG_CRASH(m_graph, m_node, "Bad use kind");
2682 void compileArithAbs()
2684 switch (m_node->child1().useKind()) {
2686 LValue value = lowInt32(m_node->child1());
2688 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2689 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2691 if (shouldCheckOverflow(m_node->arithMode()))
2692 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2698 case DoubleRepUse: {
2699 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2704 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2705 LValue argument = lowJSValue(m_node->child1());
2706 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2713 void compileArithUnary()
2715 if (m_node->child1().useKind() == DoubleRepUse) {
2716 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
2719 LValue argument = lowJSValue(m_node->child1());
2720 LValue result = vmCall(Double, m_out.operation(DFG::arithUnaryOperation(m_node->arithUnaryType())), m_callFrame, argument);
2724 void compileValuePow()
2726 if (m_node->isBinaryUseKind(BigIntUse)) {
2727 LValue base = lowBigInt(m_node->child1());
2728 LValue exponent = lowBigInt(m_node->child2());
2730 LValue result = vmCall(pointerType(), m_out.operation(operationPowBigInt), m_callFrame, base, exponent);
2735 LValue base = lowJSValue(m_node->child1());
2736 LValue exponent = lowJSValue(m_node->child2());
2737 LValue result = vmCall(Int64, m_out.operation(operationValuePow), m_callFrame, base, exponent);
2741 void compileArithPow()
2743 if (m_node->child2().useKind() == Int32Use)
2744 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2746 LValue base = lowDouble(m_node->child1());
2747 LValue exponent = lowDouble(m_node->child2());
2749 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2750 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2751 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2752 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2753 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2754 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2755 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2756 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2757 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2758 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2759 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2760 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2761 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2762 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2763 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2764 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2765 LBasicBlock powBlock = m_out.newBlock();
2766 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2767 LBasicBlock continuation = m_out.newBlock();
2769 LValue integerExponent = m_out.doubleToInt(exponent);
2770 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2771 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2772 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2774 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2775 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2776 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2778 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2779 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2780 m_out.jump(continuation);
2782 // If y is NaN, the result is NaN.
2783 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2784 LValue exponentIsNaN;
2785 if (provenType(m_node->child2()) & SpecDoubleNaN)
2786 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2788 exponentIsNaN = m_out.booleanFalse;
2789 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2791 // If abs(x) is 1 and y is +infinity, the result is NaN.
2792 // If abs(x) is 1 and y is -infinity, the result is NaN.
2794 // Test if base == 1.
2795 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2796 LValue absoluteBase = m_out.doubleAbs(base);
2797 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2798 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2800 // Test if abs(y) == Infinity.
2801 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2802 LValue absoluteExponent = m_out.doubleAbs(exponent);
2803 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2804 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2806 // If y == 0.5 or y == -0.5, handle it through SQRT.
2807 // We have be carefuly with -0 and -Infinity.
2810 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2811 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2812 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2815 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2816 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2817 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2818 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2820 // Test if abs(x) == Infinity.
2821 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2822 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2823 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2825 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2826 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2827 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2828 m_out.jump(continuation);
2830 // The exponent is 0.5, the base is infinite, the result is always infinite.
2831 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2832 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2833 m_out.jump(continuation);
2835 // Test if y == -0.5
2836 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2837 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2838 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2841 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2842 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2843 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2845 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2846 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2847 m_out.jump(continuation);
2849 // Test if abs(x) == Infinity.
2850 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2851 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2852 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2854 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2855 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2856 LValue sqrtBase = m_out.doubleSqrt(base);
2857 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2858 m_out.jump(continuation);
2860 // The exponent is -0.5, the base is infinite, the result is always zero.
2861 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2862 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2863 m_out.jump(continuation);
2865 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2866 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2867 m_out.jump(continuation);
2869 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2870 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2871 m_out.jump(continuation);
2873 m_out.appendTo(continuation, lastNext);
2874 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2878 void compileArithRandom()
2880 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2882 // Inlined WeakRandom::advance().
2883 // uint64_t x = m_low;
2884 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2885 LValue low = m_out.load64(m_out.absolute(lowAddress));
2886 // uint64_t y = m_high;
2887 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2888 LValue high = m_out.load64(m_out.absolute(highAddress));
2890 m_out.store64(high, m_out.absolute(lowAddress));
2893 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2896 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2898 // x ^= y ^ (y >> 26);
2899 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2902 m_out.store64(phase3, m_out.absolute(highAddress));
2905 LValue random64 = m_out.add(phase3, high);
2907 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2908 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2910 LValue double53Integer = m_out.intToDouble(random53);
2912 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2913 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2914 static const double scale = 1.0 / (1ULL << 53);
2916 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2917 // It just reduces the exp part of the given 53bit double integer.
2918 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2919 // Now we get 53bit precision random double value in [0, 1).
2920 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2925 void compileArithRound()
2927 if (m_node->child1().useKind() == DoubleRepUse) {
2928 LValue result = nullptr;
2929 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2930 LValue value = lowDouble(m_node->child1());
2931 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2933 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2934 LBasicBlock continuation = m_out.newBlock();
2936 LValue value = lowDouble(m_node->child1());
2937 LValue integerValue = m_out.doubleCeil(value);
2938 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2940 LValue realPart = m_out.doubleSub(integerValue, value);
2942 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2944 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2945 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2946 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2947 m_out.jump(continuation);
2948 m_out.appendTo(continuation, lastNext);
2950 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2953 if (producesInteger(m_node->arithRoundingMode())) {
2954 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2955 setInt32(integerValue);
2961 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2962 LValue argument = lowJSValue(m_node->child1());
2963 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2966 void compileArithFloor()
2968 if (m_node->child1().useKind() == DoubleRepUse) {
2969 LValue value = lowDouble(m_node->child1());
2970 LValue integerValue = m_out.doubleFloor(value);
2971 if (producesInteger(m_node->arithRoundingMode()))
2972 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2974 setDouble(integerValue);
2977 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2978 LValue argument = lowJSValue(m_node->child1());
2979 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2982 void compileArithCeil()
2984 if (m_node->child1().useKind() == DoubleRepUse) {
2985 LValue value = lowDouble(m_node->child1());
2986 LValue integerValue = m_out.doubleCeil(value);
2987 if (producesInteger(m_node->arithRoundingMode()))
2988 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2990 setDouble(integerValue);
2993 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2994 LValue argument = lowJSValue(m_node->child1());
2995 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2998 void compileArithTrunc()
3000 if (m_node->child1().useKind() == DoubleRepUse) {
3001 LValue value = lowDouble(m_node->child1());
3002 LValue result = m_out.doubleTrunc(value);
3003 if (producesInteger(m_node->arithRoundingMode()))
3004 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
3009 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
3010 LValue argument = lowJSValue(m_node->child1());
3011 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
3014 void compileArithSqrt()
3016 if (m_node->child1().useKind() == DoubleRepUse) {
3017 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
3020 LValue argument = lowJSValue(m_node->child1());
3021 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
3025 void compileArithFRound()
3027 if (m_node->child1().useKind() == DoubleRepUse) {
3028 setDouble(m_out.fround(lowDouble(m_node->child1())));
3031 LValue argument = lowJSValue(m_node->child1());
3032 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
3036 void compileValueNegate()
3038 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
3039 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
3040 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
3041 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
3042 auto repatchingFunction = operationArithNegateOptimize;
3043 auto nonRepatchingFunction = operationArithNegate;
3044 compileUnaryMathIC<JITNegGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
3047 void compileArithNegate()
3049 switch (m_node->child1().useKind()) {
3051 LValue value = lowInt32(m_node->child1());
3054 if (!shouldCheckOverflow(m_node->arithMode()))
3055 result = m_out.neg(value);
3056 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
3057 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
3058 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
3061 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
3062 result = m_out.neg(value);
3070 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)) {
3072 LValue value = lowWhicheverInt52(m_node->child1(), kind);
3073 LValue result = m_out.neg(value);
3074 if (shouldCheckNegativeZero(m_node->arithMode()))
3075 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3076 setInt52(result, kind);
3080 LValue value = lowInt52(m_node->child1());
3081 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
3082 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
3083 if (shouldCheckNegativeZero(m_node->arithMode()))
3084 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3089 case DoubleRepUse: {
3090 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
3095 DFG_CRASH(m_graph, m_node, "Bad use kind");
3100 void compileValueBitNot()
3102 if (m_node->child1().useKind() == BigIntUse) {
3103 LValue operand = lowBigInt(m_node->child1());
3104 LValue result = vmCall(pointerType(), m_out.operation(operationBitNotBigInt), m_callFrame, operand);
3109 LValue operand = lowJSValue(m_node->child1());
3110 LValue result = vmCall(Int64, m_out.operation(operationValueBitNot), m_callFrame, operand);
3114 void compileArithBitNot()
3116 setInt32(m_out.bitNot(lowInt32(m_node->child1())));
3119 void compileValueBitAnd()
3121 if (m_node->isBinaryUseKind(BigIntUse)) {
3122 LValue left = lowBigInt(m_node->child1());
3123 LValue right = lowBigInt(m_node->child2());
3125 LValue result = vmCall(pointerType(), m_out.operation(operationBitAndBigInt), m_callFrame, left, right);
3130 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
3133 void compileArithBitAnd()
3135 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));