DFG SSA stack accesses shouldn't speak of VariableAccessDatas
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGGraph.h
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #ifndef DFGGraph_h
27 #define DFGGraph_h
28
29 #if ENABLE(DFG_JIT)
30
31 #include "AssemblyHelpers.h"
32 #include "CodeBlock.h"
33 #include "DFGArgumentPosition.h"
34 #include "DFGBasicBlock.h"
35 #include "DFGDominators.h"
36 #include "DFGFrozenValue.h"
37 #include "DFGLongLivedState.h"
38 #include "DFGNaturalLoops.h"
39 #include "DFGNode.h"
40 #include "DFGNodeAllocator.h"
41 #include "DFGPlan.h"
42 #include "DFGPrePostNumbering.h"
43 #include "DFGScannable.h"
44 #include "JSStack.h"
45 #include "MethodOfGettingAValueProfile.h"
46 #include <unordered_map>
47 #include <wtf/BitVector.h>
48 #include <wtf/HashMap.h>
49 #include <wtf/Vector.h>
50 #include <wtf/StdLibExtras.h>
51
52 namespace JSC {
53
54 class CodeBlock;
55 class ExecState;
56
57 namespace DFG {
58
59 #define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do {            \
60         Node* _node = (node);                                           \
61         if (_node->flags() & NodeHasVarArgs) {                          \
62             for (unsigned _childIdx = _node->firstChild();              \
63                 _childIdx < _node->firstChild() + _node->numChildren(); \
64                 _childIdx++) {                                          \
65                 if (!!(graph).m_varArgChildren[_childIdx])              \
66                     thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
67             }                                                           \
68         } else {                                                        \
69             if (!_node->child1()) {                                     \
70                 ASSERT(                                                 \
71                     !_node->child2()                                    \
72                     && !_node->child3());                               \
73                 break;                                                  \
74             }                                                           \
75             thingToDo(_node, _node->child1());                          \
76                                                                         \
77             if (!_node->child2()) {                                     \
78                 ASSERT(!_node->child3());                               \
79                 break;                                                  \
80             }                                                           \
81             thingToDo(_node, _node->child2());                          \
82                                                                         \
83             if (!_node->child3())                                       \
84                 break;                                                  \
85             thingToDo(_node, _node->child3());                          \
86         }                                                               \
87     } while (false)
88
89 #define DFG_ASSERT(graph, node, assertion) do {                         \
90         if (!!(assertion))                                              \
91             break;                                                      \
92         (graph).handleAssertionFailure(                                 \
93             (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
94     } while (false)
95
96 #define DFG_CRASH(graph, node, reason)                                  \
97     (graph).handleAssertionFailure(                                     \
98         (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason));
99
100 struct InlineVariableData {
101     InlineCallFrame* inlineCallFrame;
102     unsigned argumentPositionStart;
103     VariableAccessData* calleeVariable;
104 };
105
106 enum AddSpeculationMode {
107     DontSpeculateInt32,
108     SpeculateInt32AndTruncateConstants,
109     SpeculateInt32
110 };
111
112 //
113 // === Graph ===
114 //
115 // The order may be significant for nodes with side-effects (property accesses, value conversions).
116 // Nodes that are 'dead' remain in the vector with refCount 0.
117 class Graph : public virtual Scannable {
118 public:
119     Graph(VM&, Plan&, LongLivedState&);
120     ~Graph();
121     
122     void changeChild(Edge& edge, Node* newNode)
123     {
124         edge.setNode(newNode);
125     }
126     
127     void changeEdge(Edge& edge, Edge newEdge)
128     {
129         edge = newEdge;
130     }
131     
132     void compareAndSwap(Edge& edge, Node* oldNode, Node* newNode)
133     {
134         if (edge.node() != oldNode)
135             return;
136         changeChild(edge, newNode);
137     }
138     
139     void compareAndSwap(Edge& edge, Edge oldEdge, Edge newEdge)
140     {
141         if (edge != oldEdge)
142             return;
143         changeEdge(edge, newEdge);
144     }
145     
146     void performSubstitution(Node* node)
147     {
148         if (node->flags() & NodeHasVarArgs) {
149             for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
150                 performSubstitutionForEdge(m_varArgChildren[childIdx]);
151         } else {
152             performSubstitutionForEdge(node->child1());
153             performSubstitutionForEdge(node->child2());
154             performSubstitutionForEdge(node->child3());
155         }
156     }
157     
158     void performSubstitutionForEdge(Edge& child)
159     {
160         // Check if this operand is actually unused.
161         if (!child)
162             return;
163         
164         // Check if there is any replacement.
165         Node* replacement = child->replacement;
166         if (!replacement)
167             return;
168         
169         child.setNode(replacement);
170         
171         // There is definitely a replacement. Assert that the replacement does not
172         // have a replacement.
173         ASSERT(!child->replacement);
174     }
175     
176     template<typename... Params>
177     Node* addNode(SpeculatedType type, Params... params)
178     {
179         Node* node = new (m_allocator) Node(params...);
180         node->predict(type);
181         return node;
182     }
183
184     void dethread();
185     
186     FrozenValue* freezeFragile(JSValue value);
187     FrozenValue* freeze(JSValue value); // We use weak freezing by default. Shorthand for freezeFragile(value)->strengthenTo(WeakValue);
188     FrozenValue* freezeStrong(JSValue value); // Shorthand for freezeFragile(value)->strengthenTo(StrongValue).
189     
190     void convertToConstant(Node* node, FrozenValue* value);
191     void convertToConstant(Node* node, JSValue value);
192     void convertToStrongConstant(Node* node, JSValue value);
193     
194     StructureRegistrationResult registerStructure(Structure* structure);
195     void assertIsRegistered(Structure* structure);
196     
197     // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
198     void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
199     enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
200     void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
201     void dump(PrintStream&, Edge);
202     void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
203     static int amountOfNodeWhiteSpace(Node*);
204     static void printNodeWhiteSpace(PrintStream&, Node*);
205
206     // Dump the code origin of the given node as a diff from the code origin of the
207     // preceding node. Returns true if anything was printed.
208     bool dumpCodeOrigin(PrintStream&, const char* prefix, Node* previousNode, Node* currentNode, DumpContext*);
209
210     AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
211     {
212         ASSERT(add->op() == ValueAdd || add->op() == ArithAdd || add->op() == ArithSub);
213         
214         RareCaseProfilingSource source = add->sourceFor(pass);
215         
216         Node* left = add->child1().node();
217         Node* right = add->child2().node();
218         
219         if (left->hasConstant())
220             return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, left, source);
221         if (right->hasConstant())
222             return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, right, source);
223         
224         return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
225     }
226     
227     AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
228     {
229         return addSpeculationMode(
230             add,
231             add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
232             add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
233             pass);
234     }
235     
236     AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
237     {
238         return addSpeculationMode(
239             add,
240             add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
241             add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
242             pass);
243     }
244     
245     AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
246     {
247         if (add->op() == ValueAdd)
248             return valueAddSpeculationMode(add, pass);
249         
250         return arithAddSpeculationMode(add, pass);
251     }
252     
253     bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
254     {
255         return addSpeculationMode(add, pass) != DontSpeculateInt32;
256     }
257     
258     bool addShouldSpeculateMachineInt(Node* add)
259     {
260         if (!enableInt52())
261             return false;
262         
263         Node* left = add->child1().node();
264         Node* right = add->child2().node();
265
266         bool speculation;
267         if (add->op() == ValueAdd)
268             speculation = Node::shouldSpeculateMachineInt(left, right);
269         else
270             speculation = Node::shouldSpeculateMachineInt(left, right);
271
272         return speculation && !hasExitSite(add, Int52Overflow);
273     }
274     
275     bool mulShouldSpeculateInt32(Node* mul, PredictionPass pass)
276     {
277         ASSERT(mul->op() == ArithMul);
278         
279         Node* left = mul->child1().node();
280         Node* right = mul->child2().node();
281         
282         return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
283             && mul->canSpeculateInt32(mul->sourceFor(pass));
284     }
285     
286     bool mulShouldSpeculateMachineInt(Node* mul, PredictionPass pass)
287     {
288         ASSERT(mul->op() == ArithMul);
289         
290         if (!enableInt52())
291             return false;
292         
293         Node* left = mul->child1().node();
294         Node* right = mul->child2().node();
295
296         return Node::shouldSpeculateMachineInt(left, right)
297             && mul->canSpeculateInt52(pass)
298             && !hasExitSite(mul, Int52Overflow);
299     }
300     
301     bool negateShouldSpeculateInt32(Node* negate, PredictionPass pass)
302     {
303         ASSERT(negate->op() == ArithNegate);
304         return negate->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
305             && negate->canSpeculateInt32(pass);
306     }
307     
308     bool negateShouldSpeculateMachineInt(Node* negate, PredictionPass pass)
309     {
310         ASSERT(negate->op() == ArithNegate);
311         if (!enableInt52())
312             return false;
313         return negate->child1()->shouldSpeculateMachineInt()
314             && !hasExitSite(negate, Int52Overflow)
315             && negate->canSpeculateInt52(pass);
316     }
317     
318     VirtualRegister bytecodeRegisterForArgument(CodeOrigin codeOrigin, int argument)
319     {
320         return VirtualRegister(
321             codeOrigin.inlineCallFrame->stackOffset +
322             baselineCodeBlockFor(codeOrigin)->argumentIndexAfterCapture(argument));
323     }
324     
325     static const char *opName(NodeType);
326     
327     StructureSet* addStructureSet(const StructureSet& structureSet)
328     {
329         ASSERT(structureSet.size());
330         m_structureSet.append(structureSet);
331         return &m_structureSet.last();
332     }
333     
334     JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
335     {
336         return m_codeBlock->globalObjectFor(codeOrigin);
337     }
338     
339     JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
340     {
341         JSGlobalObject* object = globalObjectFor(codeOrigin);
342         return jsCast<JSObject*>(object->methodTable()->toThis(object, object->globalExec(), NotStrictMode));
343     }
344     
345     ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
346     {
347         if (!inlineCallFrame)
348             return m_codeBlock->ownerExecutable();
349         
350         return inlineCallFrame->executable.get();
351     }
352     
353     ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
354     {
355         return executableFor(codeOrigin.inlineCallFrame);
356     }
357     
358     CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
359     {
360         if (!inlineCallFrame)
361             return m_profiledBlock;
362         return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
363     }
364     
365     CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
366     {
367         return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
368     }
369     
370     const BitVector& capturedVarsFor(InlineCallFrame* inlineCallFrame)
371     {
372         if (!inlineCallFrame)
373             return m_outermostCapturedVars;
374         return inlineCallFrame->capturedVars;
375     }
376     
377     bool isStrictModeFor(CodeOrigin codeOrigin)
378     {
379         if (!codeOrigin.inlineCallFrame)
380             return m_codeBlock->isStrictMode();
381         return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
382     }
383     
384     ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
385     {
386         return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
387     }
388     
389     bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
390     {
391         return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid();
392     }
393     
394     bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
395     {
396         return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(exitKind));
397     }
398     
399     bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
400     {
401         return baselineCodeBlockFor(codeOrigin)->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex, exitKind));
402     }
403     
404     bool hasExitSite(Node* node, ExitKind exitKind)
405     {
406         return hasExitSite(node->origin.semantic, exitKind);
407     }
408     
409     bool usesArguments(InlineCallFrame* inlineCallFrame)
410     {
411         if (!inlineCallFrame)
412             return m_profiledBlock->usesArguments();
413         
414         return baselineCodeBlockForInlineCallFrame(inlineCallFrame)->usesArguments();
415     }
416     
417     VirtualRegister argumentsRegisterFor(InlineCallFrame* inlineCallFrame)
418     {
419         if (!inlineCallFrame)
420             return m_profiledBlock->argumentsRegister();
421         
422         return VirtualRegister(baselineCodeBlockForInlineCallFrame(
423             inlineCallFrame)->argumentsRegister().offset() +
424             inlineCallFrame->stackOffset);
425     }
426     
427     VirtualRegister argumentsRegisterFor(const CodeOrigin& codeOrigin)
428     {
429         return argumentsRegisterFor(codeOrigin.inlineCallFrame);
430     }
431     
432     VirtualRegister machineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
433     {
434         if (!inlineCallFrame)
435             return m_codeBlock->argumentsRegister();
436         
437         return inlineCallFrame->argumentsRegister;
438     }
439     
440     VirtualRegister machineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
441     {
442         return machineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
443     }
444     
445     VirtualRegister uncheckedArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
446     {
447         if (!inlineCallFrame)
448             return m_profiledBlock->uncheckedArgumentsRegister();
449         
450         CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
451         if (!codeBlock->usesArguments())
452             return VirtualRegister();
453         
454         return VirtualRegister(codeBlock->argumentsRegister().offset() +
455             inlineCallFrame->stackOffset);
456     }
457     
458     VirtualRegister uncheckedArgumentsRegisterFor(const CodeOrigin& codeOrigin)
459     {
460         return uncheckedArgumentsRegisterFor(codeOrigin.inlineCallFrame);
461     }
462     
463     VirtualRegister activationRegister()
464     {
465         return m_profiledBlock->activationRegister();
466     }
467     
468     VirtualRegister uncheckedActivationRegister()
469     {
470         return m_profiledBlock->uncheckedActivationRegister();
471     }
472     
473     VirtualRegister machineActivationRegister()
474     {
475         return m_profiledBlock->activationRegister();
476     }
477     
478     VirtualRegister uncheckedMachineActivationRegister()
479     {
480         return m_profiledBlock->uncheckedActivationRegister();
481     }
482     
483     ValueProfile* valueProfileFor(Node*);
484     MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node*);
485     
486     bool usesArguments() const
487     {
488         return m_codeBlock->usesArguments();
489     }
490     
491     BlockIndex numBlocks() const { return m_blocks.size(); }
492     BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
493     BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
494
495     void appendBlock(PassRefPtr<BasicBlock> basicBlock)
496     {
497         basicBlock->index = m_blocks.size();
498         m_blocks.append(basicBlock);
499     }
500     
501     void killBlock(BlockIndex blockIndex)
502     {
503         m_blocks[blockIndex].clear();
504     }
505     
506     void killBlock(BasicBlock* basicBlock)
507     {
508         killBlock(basicBlock->index);
509     }
510     
511     void killBlockAndItsContents(BasicBlock*);
512     
513     void killUnreachableBlocks();
514     
515     void determineReachability();
516     void resetReachability();
517     
518     void mergeRelevantToOSR();
519     
520     void computeRefCounts();
521     
522     unsigned varArgNumChildren(Node* node)
523     {
524         ASSERT(node->flags() & NodeHasVarArgs);
525         return node->numChildren();
526     }
527     
528     unsigned numChildren(Node* node)
529     {
530         if (node->flags() & NodeHasVarArgs)
531             return varArgNumChildren(node);
532         return AdjacencyList::Size;
533     }
534     
535     Edge& varArgChild(Node* node, unsigned index)
536     {
537         ASSERT(node->flags() & NodeHasVarArgs);
538         return m_varArgChildren[node->firstChild() + index];
539     }
540     
541     Edge& child(Node* node, unsigned index)
542     {
543         if (node->flags() & NodeHasVarArgs)
544             return varArgChild(node, index);
545         return node->children.child(index);
546     }
547     
548     void voteNode(Node* node, unsigned ballot, float weight = 1)
549     {
550         switch (node->op()) {
551         case ValueToInt32:
552         case UInt32ToNumber:
553             node = node->child1().node();
554             break;
555         default:
556             break;
557         }
558         
559         if (node->op() == GetLocal)
560             node->variableAccessData()->vote(ballot, weight);
561     }
562     
563     void voteNode(Edge edge, unsigned ballot, float weight = 1)
564     {
565         voteNode(edge.node(), ballot, weight);
566     }
567     
568     void voteChildren(Node* node, unsigned ballot, float weight = 1)
569     {
570         if (node->flags() & NodeHasVarArgs) {
571             for (unsigned childIdx = node->firstChild();
572                 childIdx < node->firstChild() + node->numChildren();
573                 childIdx++) {
574                 if (!!m_varArgChildren[childIdx])
575                     voteNode(m_varArgChildren[childIdx], ballot, weight);
576             }
577             return;
578         }
579         
580         if (!node->child1())
581             return;
582         voteNode(node->child1(), ballot, weight);
583         if (!node->child2())
584             return;
585         voteNode(node->child2(), ballot, weight);
586         if (!node->child3())
587             return;
588         voteNode(node->child3(), ballot, weight);
589     }
590     
591     template<typename T> // T = Node* or Edge
592     void substitute(BasicBlock& block, unsigned startIndexInBlock, T oldThing, T newThing)
593     {
594         for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
595             Node* node = block[indexInBlock];
596             if (node->flags() & NodeHasVarArgs) {
597                 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); ++childIdx) {
598                     if (!!m_varArgChildren[childIdx])
599                         compareAndSwap(m_varArgChildren[childIdx], oldThing, newThing);
600                 }
601                 continue;
602             }
603             if (!node->child1())
604                 continue;
605             compareAndSwap(node->children.child1(), oldThing, newThing);
606             if (!node->child2())
607                 continue;
608             compareAndSwap(node->children.child2(), oldThing, newThing);
609             if (!node->child3())
610                 continue;
611             compareAndSwap(node->children.child3(), oldThing, newThing);
612         }
613     }
614     
615     // Use this if you introduce a new GetLocal and you know that you introduced it *before*
616     // any GetLocals in the basic block.
617     // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
618     // introduced anywhere in the basic block.
619     void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
620     
621     void invalidateCFG();
622     
623     void clearFlagsOnAllNodes(NodeFlags);
624     
625     void clearReplacements();
626     void initializeNodeOwners();
627     
628     BlockList blocksInPreOrder();
629     BlockList blocksInPostOrder();
630     
631     class NaturalBlockIterable {
632     public:
633         NaturalBlockIterable()
634             : m_graph(nullptr)
635         {
636         }
637         
638         NaturalBlockIterable(Graph& graph)
639             : m_graph(&graph)
640         {
641         }
642         
643         class iterator {
644         public:
645             iterator()
646                 : m_graph(nullptr)
647                 , m_index(0)
648             {
649             }
650             
651             iterator(Graph& graph, BlockIndex index)
652                 : m_graph(&graph)
653                 , m_index(findNext(index))
654             {
655             }
656             
657             BasicBlock *operator*()
658             {
659                 return m_graph->block(m_index);
660             }
661             
662             iterator& operator++()
663             {
664                 m_index = findNext(m_index + 1);
665                 return *this;
666             }
667             
668             bool operator==(const iterator& other) const
669             {
670                 return m_index == other.m_index;
671             }
672             
673             bool operator!=(const iterator& other) const
674             {
675                 return !(*this == other);
676             }
677             
678         private:
679             BlockIndex findNext(BlockIndex index)
680             {
681                 while (index < m_graph->numBlocks() && !m_graph->block(index))
682                     index++;
683                 return index;
684             }
685             
686             Graph* m_graph;
687             BlockIndex m_index;
688         };
689         
690         iterator begin()
691         {
692             return iterator(*m_graph, 0);
693         }
694         
695         iterator end()
696         {
697             return iterator(*m_graph, m_graph->numBlocks());
698         }
699         
700     private:
701         Graph* m_graph;
702     };
703     
704     NaturalBlockIterable blocksInNaturalOrder()
705     {
706         return NaturalBlockIterable(*this);
707     }
708     
709     template<typename ChildFunctor>
710     void doToChildrenWithNode(Node* node, const ChildFunctor& functor)
711     {
712         DFG_NODE_DO_TO_CHILDREN(*this, node, functor);
713     }
714     
715     template<typename ChildFunctor>
716     void doToChildren(Node* node, const ChildFunctor& functor)
717     {
718         doToChildrenWithNode(
719             node,
720             [&functor] (Node*, Edge& edge) {
721                 functor(edge);
722             });
723     }
724     
725     Profiler::Compilation* compilation() { return m_plan.compilation.get(); }
726     
727     DesiredIdentifiers& identifiers() { return m_plan.identifiers; }
728     DesiredWatchpoints& watchpoints() { return m_plan.watchpoints; }
729     
730     FullBytecodeLiveness& livenessFor(CodeBlock*);
731     FullBytecodeLiveness& livenessFor(InlineCallFrame*);
732     bool isLiveInBytecode(VirtualRegister, CodeOrigin);
733     
734     unsigned frameRegisterCount();
735     unsigned stackPointerOffset();
736     unsigned requiredRegisterCountForExit();
737     unsigned requiredRegisterCountForExecutionAndExit();
738     
739     JSValue tryGetConstantProperty(JSValue base, const StructureSet&, PropertyOffset);
740     JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset);
741     JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset);
742     JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset);
743     
744     JSLexicalEnvironment* tryGetActivation(Node*);
745     WriteBarrierBase<Unknown>* tryGetRegisters(Node*);
746     
747     JSArrayBufferView* tryGetFoldableView(Node*);
748     JSArrayBufferView* tryGetFoldableView(Node*, ArrayMode);
749     JSArrayBufferView* tryGetFoldableViewForChild1(Node*);
750     
751     void registerFrozenValues();
752     
753     virtual void visitChildren(SlotVisitor&) override;
754     
755     NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
756         std::nullptr_t, const char* file, int line, const char* function,
757         const char* assertion);
758     NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
759         Node*, const char* file, int line, const char* function,
760         const char* assertion);
761     NO_RETURN_DUE_TO_CRASH void handleAssertionFailure(
762         BasicBlock*, const char* file, int line, const char* function,
763         const char* assertion);
764     
765     VM& m_vm;
766     Plan& m_plan;
767     CodeBlock* m_codeBlock;
768     CodeBlock* m_profiledBlock;
769     
770     NodeAllocator& m_allocator;
771
772     Operands<FrozenValue*> m_mustHandleValues;
773     
774     Vector< RefPtr<BasicBlock> , 8> m_blocks;
775     Vector<Edge, 16> m_varArgChildren;
776
777     HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap;
778     Bag<FrozenValue> m_frozenValues;
779     
780     Bag<StorageAccessData> m_storageAccessData;
781     
782     // In CPS, this is all of the SetArgument nodes for the arguments in the machine code block
783     // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
784     // nodes.
785     //
786     // In SSA, this is all of the GetStack nodes for the arguments in the machine code block that
787     // may have some speculation in the prologue and survived DCE. Note that to get the speculation
788     // for an argument in SSA, you must use m_argumentFormats, since we still have to speculate
789     // even if the argument got killed. For example:
790     //
791     //     function foo(x) {
792     //        var tmp = x + 1;
793     //     }
794     //
795     // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
796     // have a proven check for the edge to "x". So, we will not insert a Check node and we will
797     // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
798     // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
799     //
800     //     var o = {
801     //         valueOf: function() { do side effects; }
802     //     };
803     //     foo(o);
804     //
805     // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
806     // effects.
807     Vector<Node*, 8> m_arguments;
808     
809     // In CPS, this is meaningless. In SSA, this is the argument speculation that we've locked in.
810     Vector<FlushFormat> m_argumentFormats;
811     
812     SegmentedVector<VariableAccessData, 16> m_variableAccessData;
813     SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
814     SegmentedVector<StructureSet, 16> m_structureSet;
815     Bag<Transition> m_transitions;
816     SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData;
817     Bag<BranchData> m_branchData;
818     Bag<SwitchData> m_switchData;
819     Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
820     Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
821     Bag<ObjectMaterializationData> m_objectMaterializationData;
822     Bag<CallVarargsData> m_callVarargsData;
823     Bag<LoadVarargsData> m_loadVarargsData;
824     Bag<StackAccessData> m_stackAccessData;
825     Vector<InlineVariableData, 4> m_inlineVariableData;
826     HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
827     bool m_hasArguments;
828     HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped;
829     BitVector m_lazyVars;
830     Dominators m_dominators;
831     PrePostNumbering m_prePostNumbering;
832     NaturalLoops m_naturalLoops;
833     unsigned m_localVars;
834     unsigned m_nextMachineLocal;
835     unsigned m_parameterSlots;
836     int m_machineCaptureStart;
837     std::unique_ptr<SlowArgument[]> m_slowArguments;
838     BitVector m_outermostCapturedVars;
839
840 #if USE(JSVALUE32_64)
841     std::unordered_map<int64_t, double*> m_doubleConstantsMap;
842     std::unique_ptr<Bag<double>> m_doubleConstants;
843 #endif
844     
845     OptimizationFixpointState m_fixpointState;
846     StructureRegistrationState m_structureRegistrationState;
847     GraphForm m_form;
848     UnificationState m_unificationState;
849     RefCountState m_refCountState;
850 private:
851     
852     void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
853     
854     AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* immediate, RareCaseProfilingSource source)
855     {
856         ASSERT(immediate->hasConstant());
857         
858         JSValue immediateValue = immediate->asJSValue();
859         if (!immediateValue.isNumber() && !immediateValue.isBoolean())
860             return DontSpeculateInt32;
861         
862         if (!variableShouldSpeculateInt32)
863             return DontSpeculateInt32;
864         
865         if (immediateValue.isInt32() || immediateValue.isBoolean())
866             return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
867         
868         double doubleImmediate = immediateValue.asDouble();
869         const double twoToThe48 = 281474976710656.0;
870         if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
871             return DontSpeculateInt32;
872         
873         return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
874     }
875 };
876
877 } } // namespace JSC::DFG
878
879 #endif
880 #endif