2 * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "AssemblyHelpers.h"
31 #include "BytecodeLivenessAnalysisInlines.h"
32 #include "CodeBlock.h"
33 #include "DFGArgumentPosition.h"
34 #include "DFGBasicBlock.h"
35 #include "DFGFrozenValue.h"
36 #include "DFGLongLivedState.h"
38 #include "DFGNodeAllocator.h"
40 #include "DFGPropertyTypeKey.h"
41 #include "DFGScannable.h"
42 #include "FullBytecodeLiveness.h"
43 #include "MethodOfGettingAValueProfile.h"
44 #include <unordered_map>
45 #include <wtf/BitVector.h>
46 #include <wtf/HashMap.h>
47 #include <wtf/Vector.h>
48 #include <wtf/StdLibExtras.h>
58 class BackwardsDominators;
60 class ControlEquivalenceAnalysis;
64 class PrePostNumbering;
66 template<typename> class FlowMap;
68 #define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
69 Node* _node = (node); \
70 if (_node->flags() & NodeHasVarArgs) { \
71 for (unsigned _childIdx = _node->firstChild(); \
72 _childIdx < _node->firstChild() + _node->numChildren(); \
74 if (!!(graph).m_varArgChildren[_childIdx]) \
75 thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
78 if (!_node->child1()) { \
81 && !_node->child3()); \
84 thingToDo(_node, _node->child1()); \
86 if (!_node->child2()) { \
87 ASSERT(!_node->child3()); \
90 thingToDo(_node, _node->child2()); \
92 if (!_node->child3()) \
94 thingToDo(_node, _node->child3()); \
98 #define DFG_ASSERT(graph, node, assertion) do { \
101 (graph).handleAssertionFailure( \
102 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
105 #define DFG_CRASH(graph, node, reason) do { \
106 (graph).handleAssertionFailure( \
107 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \
110 struct InlineVariableData {
111 InlineCallFrame* inlineCallFrame;
112 unsigned argumentPositionStart;
113 VariableAccessData* calleeVariable;
116 enum AddSpeculationMode {
118 SpeculateInt32AndTruncateConstants,
125 // The order may be significant for nodes with side-effects (property accesses, value conversions).
126 // Nodes that are 'dead' remain in the vector with refCount 0.
127 class Graph : public virtual Scannable {
129 Graph(VM&, Plan&, LongLivedState&);
132 void changeChild(Edge& edge, Node* newNode)
134 edge.setNode(newNode);
137 void changeEdge(Edge& edge, Edge newEdge)
142 void compareAndSwap(Edge& edge, Node* oldNode, Node* newNode)
144 if (edge.node() != oldNode)
146 changeChild(edge, newNode);
149 void compareAndSwap(Edge& edge, Edge oldEdge, Edge newEdge)
153 changeEdge(edge, newEdge);
156 void performSubstitution(Node* node)
158 if (node->flags() & NodeHasVarArgs) {
159 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
160 performSubstitutionForEdge(m_varArgChildren[childIdx]);
162 performSubstitutionForEdge(node->child1());
163 performSubstitutionForEdge(node->child2());
164 performSubstitutionForEdge(node->child3());
168 void performSubstitutionForEdge(Edge& child)
170 // Check if this operand is actually unused.
174 // Check if there is any replacement.
175 Node* replacement = child->replacement();
179 child.setNode(replacement);
181 // There is definitely a replacement. Assert that the replacement does not
182 // have a replacement.
183 ASSERT(!child->replacement());
186 template<typename... Params>
187 Node* addNode(Params... params)
189 Node* node = new (m_allocator) Node(params...);
190 addNodeToMapByIndex(node);
193 template<typename... Params>
194 Node* addNode(SpeculatedType type, Params... params)
196 Node* node = new (m_allocator) Node(params...);
198 addNodeToMapByIndex(node);
202 void deleteNode(Node*);
203 unsigned maxNodeCount() const { return m_nodesByIndex.size(); }
204 Node* nodeAt(unsigned index) const { return m_nodesByIndex[index]; }
205 void packNodeIndices();
209 FrozenValue* freeze(JSValue); // We use weak freezing by default.
210 FrozenValue* freezeStrong(JSValue); // Shorthand for freeze(value)->strengthenTo(StrongValue).
212 void convertToConstant(Node* node, FrozenValue* value);
213 void convertToConstant(Node* node, JSValue value);
214 void convertToStrongConstant(Node* node, JSValue value);
216 RegisteredStructure registerStructure(Structure* structure)
218 StructureRegistrationResult ignored;
219 return registerStructure(structure, ignored);
221 RegisteredStructure registerStructure(Structure*, StructureRegistrationResult&);
222 void assertIsRegistered(Structure* structure);
224 // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
225 void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
227 bool terminalsAreValid();
229 enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
230 void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
231 void dump(PrintStream&, Edge);
232 void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
233 static int amountOfNodeWhiteSpace(Node*);
234 static void printNodeWhiteSpace(PrintStream&, Node*);
236 // Dump the code origin of the given node as a diff from the code origin of the
237 // preceding node. Returns true if anything was printed.
238 bool dumpCodeOrigin(PrintStream&, const char* prefix, Node*& previousNode, Node* currentNode, DumpContext*);
240 AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
242 ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub);
244 RareCaseProfilingSource source = add->sourceFor(pass);
246 Node* left = add->child1().node();
247 Node* right = add->child2().node();
249 if (left->hasConstant())
250 return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, right, left, source);
251 if (right->hasConstant())
252 return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, left, right, source);
254 return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
257 AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
259 return addSpeculationMode(
261 add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
262 add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
266 AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
268 return addSpeculationMode(
270 add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
271 add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
275 AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
277 if (add->op() == ValueAdd)
278 return valueAddSpeculationMode(add, pass);
280 return arithAddSpeculationMode(add, pass);
283 bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
285 return addSpeculationMode(add, pass) != DontSpeculateInt32;
288 bool addShouldSpeculateAnyInt(Node* add)
293 Node* left = add->child1().node();
294 Node* right = add->child2().node();
296 if (hasExitSite(add, Int52Overflow))
299 if (Node::shouldSpeculateAnyInt(left, right))
302 auto shouldSpeculateAnyIntForAdd = [](Node* node) {
303 auto isAnyIntSpeculationForAdd = [](SpeculatedType value) {
304 return !!value && (value & (SpecAnyInt | SpecAnyIntAsDouble)) == value;
307 // When DoubleConstant node appears, it means that users explicitly write a constant in their code with double form instead of integer form (1.0 instead of 1).
308 // In that case, we should honor this decision: using it as integer is not appropriate.
309 if (node->op() == DoubleConstant)
311 return isAnyIntSpeculationForAdd(node->prediction());
314 // Allow AnyInt ArithAdd only when the one side of the binary operation should be speculated AnyInt. It is a bit conservative
315 // decision. This is because Double to Int52 conversion is not so cheap. Frequent back-and-forth conversions between Double and Int52
316 // rather hurt the performance. If the one side of the operation is already Int52, the cost for constructing ArithAdd becomes
317 // cheap since only one Double to Int52 conversion could be required.
318 // This recovers some regression in assorted tests while keeping kraken crypto improvements.
319 if (!left->shouldSpeculateAnyInt() && !right->shouldSpeculateAnyInt())
322 auto usesAsNumbers = [](Node* node) {
323 NodeFlags flags = node->flags() & NodeBytecodeBackPropMask;
326 return (flags & (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex)) == flags;
329 // Wrapping Int52 to Value is also not so cheap. Thus, we allow Int52 addition only when the node is used as number.
330 if (!usesAsNumbers(add))
333 return shouldSpeculateAnyIntForAdd(left) && shouldSpeculateAnyIntForAdd(right);
336 bool binaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
338 Node* left = node->child1().node();
339 Node* right = node->child2().node();
341 return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
342 && node->canSpeculateInt32(node->sourceFor(pass));
345 bool binaryArithShouldSpeculateAnyInt(Node* node, PredictionPass pass)
350 Node* left = node->child1().node();
351 Node* right = node->child2().node();
353 return Node::shouldSpeculateAnyInt(left, right)
354 && node->canSpeculateInt52(pass)
355 && !hasExitSite(node, Int52Overflow);
358 bool unaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
360 return node->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
361 && node->canSpeculateInt32(pass);
364 bool unaryArithShouldSpeculateAnyInt(Node* node, PredictionPass pass)
368 return node->child1()->shouldSpeculateAnyInt()
369 && node->canSpeculateInt52(pass)
370 && !hasExitSite(node, Int52Overflow);
373 bool canOptimizeStringObjectAccess(const CodeOrigin&);
375 bool getRegExpPrototypeProperty(JSObject* regExpPrototype, Structure* regExpPrototypeStructure, UniquedStringImpl* uid, JSValue& returnJSValue);
377 bool roundShouldSpeculateInt32(Node* arithRound, PredictionPass pass)
379 ASSERT(arithRound->op() == ArithRound || arithRound->op() == ArithFloor || arithRound->op() == ArithCeil || arithRound->op() == ArithTrunc);
380 return arithRound->canSpeculateInt32(pass) && !hasExitSite(arithRound->origin.semantic, Overflow) && !hasExitSite(arithRound->origin.semantic, NegativeZero);
383 static const char *opName(NodeType);
385 RegisteredStructureSet* addStructureSet(const StructureSet& structureSet)
387 m_structureSets.append();
388 RegisteredStructureSet* result = &m_structureSets.last();
390 for (Structure* structure : structureSet)
391 result->add(registerStructure(structure));
396 RegisteredStructureSet* addStructureSet(const RegisteredStructureSet& structureSet)
398 m_structureSets.append();
399 RegisteredStructureSet* result = &m_structureSets.last();
401 for (RegisteredStructure structure : structureSet)
402 result->add(structure);
407 JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
409 return m_codeBlock->globalObjectFor(codeOrigin);
412 JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
414 JSGlobalObject* object = globalObjectFor(codeOrigin);
415 return jsCast<JSObject*>(object->methodTable()->toThis(object, object->globalExec(), NotStrictMode));
418 ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
420 if (!inlineCallFrame)
421 return m_codeBlock->ownerScriptExecutable();
423 return inlineCallFrame->baselineCodeBlock->ownerScriptExecutable();
426 ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
428 return executableFor(codeOrigin.inlineCallFrame);
431 CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
433 if (!inlineCallFrame)
434 return m_profiledBlock;
435 return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
438 CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
440 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
443 bool isStrictModeFor(CodeOrigin codeOrigin)
445 if (!codeOrigin.inlineCallFrame)
446 return m_codeBlock->isStrictMode();
447 return codeOrigin.inlineCallFrame->isStrictMode();
450 ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
452 return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
455 bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
457 return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid();
460 bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
462 return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(exitKind));
465 bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
467 return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex, exitKind));
470 bool hasExitSite(Node* node, ExitKind exitKind)
472 return hasExitSite(node->origin.semantic, exitKind);
475 MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* currentNode, Node* operandNode);
477 BlockIndex numBlocks() const { return m_blocks.size(); }
478 BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
479 BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
481 void appendBlock(Ref<BasicBlock>&& basicBlock)
483 basicBlock->index = m_blocks.size();
484 m_blocks.append(WTFMove(basicBlock));
487 void killBlock(BlockIndex blockIndex)
489 m_blocks[blockIndex] = nullptr;
492 void killBlock(BasicBlock* basicBlock)
494 killBlock(basicBlock->index);
497 void killBlockAndItsContents(BasicBlock*);
499 void killUnreachableBlocks();
501 void determineReachability();
502 void resetReachability();
504 void computeRefCounts();
506 unsigned varArgNumChildren(Node* node)
508 ASSERT(node->flags() & NodeHasVarArgs);
509 return node->numChildren();
512 unsigned numChildren(Node* node)
514 if (node->flags() & NodeHasVarArgs)
515 return varArgNumChildren(node);
516 return AdjacencyList::Size;
519 Edge& varArgChild(Node* node, unsigned index)
521 ASSERT(node->flags() & NodeHasVarArgs);
522 return m_varArgChildren[node->firstChild() + index];
525 Edge& child(Node* node, unsigned index)
527 if (node->flags() & NodeHasVarArgs)
528 return varArgChild(node, index);
529 return node->children.child(index);
532 void voteNode(Node* node, unsigned ballot, float weight = 1)
534 switch (node->op()) {
537 node = node->child1().node();
543 if (node->op() == GetLocal)
544 node->variableAccessData()->vote(ballot, weight);
547 void voteNode(Edge edge, unsigned ballot, float weight = 1)
549 voteNode(edge.node(), ballot, weight);
552 void voteChildren(Node* node, unsigned ballot, float weight = 1)
554 if (node->flags() & NodeHasVarArgs) {
555 for (unsigned childIdx = node->firstChild();
556 childIdx < node->firstChild() + node->numChildren();
558 if (!!m_varArgChildren[childIdx])
559 voteNode(m_varArgChildren[childIdx], ballot, weight);
566 voteNode(node->child1(), ballot, weight);
569 voteNode(node->child2(), ballot, weight);
572 voteNode(node->child3(), ballot, weight);
575 template<typename T> // T = Node* or Edge
576 void substitute(BasicBlock& block, unsigned startIndexInBlock, T oldThing, T newThing)
578 for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
579 Node* node = block[indexInBlock];
580 if (node->flags() & NodeHasVarArgs) {
581 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); ++childIdx) {
582 if (!!m_varArgChildren[childIdx])
583 compareAndSwap(m_varArgChildren[childIdx], oldThing, newThing);
589 compareAndSwap(node->children.child1(), oldThing, newThing);
592 compareAndSwap(node->children.child2(), oldThing, newThing);
595 compareAndSwap(node->children.child3(), oldThing, newThing);
599 // Use this if you introduce a new GetLocal and you know that you introduced it *before*
600 // any GetLocals in the basic block.
601 // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
602 // introduced anywhere in the basic block.
603 void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
605 void invalidateCFG();
606 void invalidateNodeLiveness();
608 void clearFlagsOnAllNodes(NodeFlags);
610 void clearReplacements();
612 void initializeNodeOwners();
614 BlockList blocksInPreOrder();
615 BlockList blocksInPostOrder();
617 class NaturalBlockIterable {
619 NaturalBlockIterable()
624 NaturalBlockIterable(Graph& graph)
637 iterator(Graph& graph, BlockIndex index)
639 , m_index(findNext(index))
643 BasicBlock *operator*()
645 return m_graph->block(m_index);
648 iterator& operator++()
650 m_index = findNext(m_index + 1);
654 bool operator==(const iterator& other) const
656 return m_index == other.m_index;
659 bool operator!=(const iterator& other) const
661 return !(*this == other);
665 BlockIndex findNext(BlockIndex index)
667 while (index < m_graph->numBlocks() && !m_graph->block(index))
678 return iterator(*m_graph, 0);
683 return iterator(*m_graph, m_graph->numBlocks());
690 NaturalBlockIterable blocksInNaturalOrder()
692 return NaturalBlockIterable(*this);
695 template<typename ChildFunctor>
696 void doToChildrenWithNode(Node* node, const ChildFunctor& functor)
698 DFG_NODE_DO_TO_CHILDREN(*this, node, functor);
701 template<typename ChildFunctor>
702 void doToChildren(Node* node, const ChildFunctor& functor)
704 doToChildrenWithNode(
706 [&functor] (Node*, Edge& edge) {
711 bool uses(Node* node, Node* child)
714 doToChildren(node, [&] (Edge edge) { result |= edge == child; });
718 bool isWatchingHavingABadTimeWatchpoint(Node* node)
720 JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
721 return watchpoints().isWatched(globalObject->havingABadTimeWatchpoint());
724 bool isWatchingArrayIteratorProtocolWatchpoint(Node* node)
726 JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
727 InlineWatchpointSet& set = globalObject->arrayIteratorProtocolWatchpoint();
728 if (watchpoints().isWatched(set))
731 if (set.isStillValid()) {
732 // Since the global object owns this watchpoint, we make ourselves have a weak pointer to it.
733 // If the global object got deallocated, it wouldn't fire the watchpoint. It's unlikely the
734 // global object would get deallocated without this code ever getting thrown away, however,
735 // it's more sound logically to depend on the global object lifetime weakly.
736 freeze(globalObject);
737 watchpoints().addLazily(set);
744 Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
746 DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
747 DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
749 // Returns false if the key is already invalid or unwatchable. If this is a Presence condition,
750 // this also makes it cheap to query if the condition holds. Also makes sure that the GC knows
752 bool watchCondition(const ObjectPropertyCondition&);
753 bool watchConditions(const ObjectPropertyConditionSet&);
755 // Checks if it's known that loading from the given object at the given offset is fine. This is
756 // computed by tracking which conditions we track with watchCondition().
757 bool isSafeToLoad(JSObject* base, PropertyOffset);
759 void registerInferredType(const InferredType::Descriptor& type)
761 if (type.structure())
762 registerStructure(type.structure());
765 // Tells us what inferred type we are able to prove the property to have now and in the future.
766 InferredType::Descriptor inferredTypeFor(const PropertyTypeKey&);
767 InferredType::Descriptor inferredTypeForProperty(Structure* structure, UniquedStringImpl* uid)
769 return inferredTypeFor(PropertyTypeKey(structure, uid));
772 AbstractValue inferredValueForProperty(
773 const RegisteredStructureSet& base, UniquedStringImpl* uid, StructureClobberState = StructuresAreWatched);
775 // This uses either constant property inference or property type inference to derive a good abstract
776 // value for some property accessed with the given abstract value base.
777 AbstractValue inferredValueForProperty(
778 const AbstractValue& base, UniquedStringImpl* uid, PropertyOffset, StructureClobberState);
780 FullBytecodeLiveness& livenessFor(CodeBlock*);
781 FullBytecodeLiveness& livenessFor(InlineCallFrame*);
783 // Quickly query if a single local is live at the given point. This is faster than calling
784 // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
785 // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
786 bool isLiveInBytecode(VirtualRegister, CodeOrigin);
788 // Quickly get all of the non-argument locals live at the given point. This doesn't give you
789 // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
790 // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
791 template<typename Functor>
792 void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
794 // Support for not redundantly reporting arguments. Necessary because in case of a varargs
795 // call, only the callee knows that arguments are live while in the case of a non-varargs
796 // call, both callee and caller will see the variables live.
797 VirtualRegister exclusionStart;
798 VirtualRegister exclusionEnd;
800 CodeOrigin* codeOriginPtr = &codeOrigin;
803 InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame;
804 VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0);
806 if (inlineCallFrame) {
807 if (inlineCallFrame->isClosureCall)
808 functor(stackOffset + CallFrameSlot::callee);
809 if (inlineCallFrame->isVarargs())
810 functor(stackOffset + CallFrameSlot::argumentCount);
813 CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
814 FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
815 const FastBitVector& liveness = fullLiveness.getLiveness(codeOriginPtr->bytecodeIndex);
816 for (unsigned relativeLocal = codeBlock->m_numCalleeLocals; relativeLocal--;) {
817 VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal);
819 // Don't report if our callee already reported.
820 if (reg >= exclusionStart && reg < exclusionEnd)
823 if (liveness[relativeLocal])
827 if (!inlineCallFrame)
830 // Arguments are always live. This would be redundant if it wasn't for our
831 // op_call_varargs inlining. See the comment above.
832 exclusionStart = stackOffset + CallFrame::argumentOffsetIncludingThis(0);
833 exclusionEnd = stackOffset + CallFrame::argumentOffsetIncludingThis(inlineCallFrame->arguments.size());
835 // We will always have a "this" argument and exclusionStart should be a smaller stack
836 // offset than exclusionEnd.
837 ASSERT(exclusionStart < exclusionEnd);
839 for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1)
842 codeOriginPtr = inlineCallFrame->getCallerSkippingTailCalls();
844 // The first inline call frame could be an inline tail call
850 // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if
851 // you want to compare two sets of live locals from two different CodeOrigins.
852 BitVector localsLiveInBytecode(CodeOrigin);
854 // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small
855 // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live.
856 template<typename Functor>
857 void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
859 forAllLocalsLiveInBytecode(codeOrigin, functor);
861 // Report all arguments as being live.
862 for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;)
863 functor(virtualRegisterForArgument(argument));
866 BytecodeKills& killsFor(CodeBlock*);
867 BytecodeKills& killsFor(InlineCallFrame*);
869 static unsigned parameterSlotsForArgCount(unsigned);
871 unsigned frameRegisterCount();
872 unsigned stackPointerOffset();
873 unsigned requiredRegisterCountForExit();
874 unsigned requiredRegisterCountForExecutionAndExit();
876 JSValue tryGetConstantProperty(JSValue base, const RegisteredStructureSet&, PropertyOffset);
877 JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset);
878 JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset);
879 JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset);
881 JSValue tryGetConstantClosureVar(JSValue base, ScopeOffset);
882 JSValue tryGetConstantClosureVar(const AbstractValue&, ScopeOffset);
883 JSValue tryGetConstantClosureVar(Node*, ScopeOffset);
885 JSArrayBufferView* tryGetFoldableView(JSValue);
886 JSArrayBufferView* tryGetFoldableView(JSValue, ArrayMode arrayMode);
888 void registerFrozenValues();
890 void visitChildren(SlotVisitor&) override;
892 NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
893 std::nullptr_t, const char* file, int line, const char* function,
894 const char* assertion);
895 NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
896 Node*, const char* file, int line, const char* function,
897 const char* assertion);
898 NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
899 BasicBlock*, const char* file, int line, const char* function,
900 const char* assertion);
902 bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled; }
904 Dominators& ensureDominators();
905 PrePostNumbering& ensurePrePostNumbering();
906 NaturalLoops& ensureNaturalLoops();
907 BackwardsCFG& ensureBackwardsCFG();
908 BackwardsDominators& ensureBackwardsDominators();
909 ControlEquivalenceAnalysis& ensureControlEquivalenceAnalysis();
911 // This function only makes sense to call after bytecode parsing
912 // because it queries the m_hasExceptionHandlers boolean whose value
913 // is only fully determined after bytcode parsing.
914 bool willCatchExceptionInMachineFrame(CodeOrigin, CodeOrigin& opCatchOriginOut, HandlerInfo*& catchHandlerOut);
916 bool needsScopeRegister() const { return m_hasDebuggerEnabled || m_codeBlock->usesEval(); }
917 bool needsFlushedThis() const { return m_codeBlock->usesEval(); }
921 CodeBlock* m_codeBlock;
922 CodeBlock* m_profiledBlock;
924 NodeAllocator& m_allocator;
926 Vector< RefPtr<BasicBlock> , 8> m_blocks;
927 Vector<Edge, 16> m_varArgChildren;
929 HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap;
930 Bag<FrozenValue> m_frozenValues;
932 Vector<uint32_t> m_uint32ValuesInUse;
934 Bag<StorageAccessData> m_storageAccessData;
936 // In CPS, this is all of the SetArgument nodes for the arguments in the machine code block
937 // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
940 // In SSA, this is all of the GetStack nodes for the arguments in the machine code block that
941 // may have some speculation in the prologue and survived DCE. Note that to get the speculation
942 // for an argument in SSA, you must use m_argumentFormats, since we still have to speculate
943 // even if the argument got killed. For example:
949 // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
950 // have a proven check for the edge to "x". So, we will not insert a Check node and we will
951 // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
952 // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
955 // valueOf: function() { do side effects; }
959 // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
961 Vector<Node*, 8> m_arguments;
963 // In CPS, this is meaningless. In SSA, this is the argument speculation that we've locked in.
964 Vector<FlushFormat> m_argumentFormats;
966 SegmentedVector<VariableAccessData, 16> m_variableAccessData;
967 SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
968 Bag<Transition> m_transitions;
969 SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData;
970 Bag<BranchData> m_branchData;
971 Bag<SwitchData> m_switchData;
972 Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
973 Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
974 Bag<ObjectMaterializationData> m_objectMaterializationData;
975 Bag<CallVarargsData> m_callVarargsData;
976 Bag<LoadVarargsData> m_loadVarargsData;
977 Bag<StackAccessData> m_stackAccessData;
978 Bag<LazyJSValue> m_lazyJSValues;
979 Bag<CallDOMGetterData> m_callDOMGetterData;
980 Bag<BitVector> m_bitVectors;
981 Vector<InlineVariableData, 4> m_inlineVariableData;
982 HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
983 HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>> m_bytecodeKills;
984 HashSet<std::pair<JSObject*, PropertyOffset>> m_safeToLoad;
985 HashMap<PropertyTypeKey, InferredType::Descriptor> m_inferredTypes;
986 Vector<RefPtr<DOMJIT::Patchpoint>> m_domJITPatchpoints;
987 std::unique_ptr<Dominators> m_dominators;
988 std::unique_ptr<PrePostNumbering> m_prePostNumbering;
989 std::unique_ptr<NaturalLoops> m_naturalLoops;
990 std::unique_ptr<CFG> m_cfg;
991 std::unique_ptr<BackwardsCFG> m_backwardsCFG;
992 std::unique_ptr<BackwardsDominators> m_backwardsDominators;
993 std::unique_ptr<ControlEquivalenceAnalysis> m_controlEquivalenceAnalysis;
994 unsigned m_localVars;
995 unsigned m_nextMachineLocal;
996 unsigned m_parameterSlots;
998 HashSet<String> m_localStrings;
999 HashMap<const StringImpl*, String> m_copiedStrings;
1001 #if USE(JSVALUE32_64)
1002 std::unordered_map<int64_t, double*> m_doubleConstantsMap;
1003 std::unique_ptr<Bag<double>> m_doubleConstants;
1006 OptimizationFixpointState m_fixpointState;
1007 StructureRegistrationState m_structureRegistrationState;
1009 UnificationState m_unificationState;
1010 PlanStage m_planStage { PlanStage::Initial };
1011 RefCountState m_refCountState;
1012 bool m_hasDebuggerEnabled;
1013 bool m_hasExceptionHandlers { false };
1014 std::unique_ptr<FlowIndexing> m_indexingCache;
1015 std::unique_ptr<FlowMap<AbstractValue>> m_abstractValuesCache;
1017 RegisteredStructure stringStructure;
1018 RegisteredStructure symbolStructure;
1021 void addNodeToMapByIndex(Node*);
1023 bool isStringPrototypeMethodSane(JSGlobalObject*, UniquedStringImpl*);
1025 void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
1027 AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* operand, Node*immediate, RareCaseProfilingSource source)
1029 ASSERT(immediate->hasConstant());
1031 JSValue immediateValue = immediate->asJSValue();
1032 if (!immediateValue.isNumber() && !immediateValue.isBoolean())
1033 return DontSpeculateInt32;
1035 if (!variableShouldSpeculateInt32)
1036 return DontSpeculateInt32;
1038 // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0).
1039 // In that case, we stay conservative unless the other operand was explicitly typed as integer.
1040 NodeFlags operandResultType = operand->result();
1041 if (operandResultType != NodeResultInt32 && immediateValue.isDouble())
1042 return DontSpeculateInt32;
1044 if (immediateValue.isBoolean() || jsNumber(immediateValue.asNumber()).isInt32())
1045 return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
1047 double doubleImmediate = immediateValue.asDouble();
1048 const double twoToThe48 = 281474976710656.0;
1049 if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
1050 return DontSpeculateInt32;
1052 return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
1055 Vector<Node*, 0, UnsafeVectorOverflow> m_nodesByIndex;
1056 Vector<unsigned, 0, UnsafeVectorOverflow> m_nodeIndexFreeList;
1057 SegmentedVector<RegisteredStructureSet, 16> m_structureSets;
1060 } } // namespace JSC::DFG