Object allocation sinking phase shouldn't re-decorate previously sunken allocations...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractHeap.h"
32 #include "DFGBlockMapInlines.h"
33 #include "DFGClobberize.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertOSRHintsForUpdate.h"
36 #include "DFGInsertionSet.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromoteHeapAccess.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 namespace JSC { namespace DFG {
46
47 static bool verbose = false;
48
49 class ObjectAllocationSinkingPhase : public Phase {
50 public:
51     ObjectAllocationSinkingPhase(Graph& graph)
52         : Phase(graph, "object allocation sinking")
53         , m_ssaCalculator(graph)
54         , m_insertionSet(graph)
55     {
56     }
57     
58     bool run()
59     {
60         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
61         
62         m_graph.m_dominators.computeIfNecessary(m_graph);
63         
64         // Logically we wish to consider every NewObject and sink it. However it's probably not
65         // profitable to sink a NewObject that will always escape. So, first we do a very simple
66         // forward flow analysis that determines the set of NewObject nodes that have any chance
67         // of benefiting from object allocation sinking. Then we fixpoint the following rules:
68         //
69         // - For each NewObject, we turn the original NewObject into a PhantomNewObject and then
70         //   we insert MaterializeNewObject just before those escaping sites that come before any
71         //   other escaping sites - that is, there is no path between the allocation and those sites
72         //   that would see any other escape. Note that Upsilons constitute escaping sites. Then we
73         //   insert additional MaterializeNewObject nodes on Upsilons that feed into Phis that mix
74         //   materializations and the original PhantomNewObject. We then turn each PutByOffset over a
75         //   PhantomNewObject into a PutByOffsetHint.
76         //
77         // - We perform the same optimization for MaterializeNewObject. This allows us to cover
78         //   cases where we had MaterializeNewObject flowing into a PutByOffsetHint.
79         //
80         // We could also add this rule:
81         //
82         // - If all of the Upsilons of a Phi have a MaterializeNewObject that isn't used by anyone
83         //   else, then replace the Phi with the MaterializeNewObject.
84         //
85         //   FIXME: Implement this. Note that this totally doable but it requires some gnarly
86         //   code, and to be effective the pruner needs to be aware of it. Currently any Upsilon
87         //   is considered to be an escape even by the pruner, so it's unlikely that we'll see
88         //   many cases of Phi over Materializations.
89         //   https://bugs.webkit.org/show_bug.cgi?id=136927
90         
91         if (!performSinking())
92             return false;
93         
94         while (performSinking()) { }
95         
96         if (verbose) {
97             dataLog("Graph after sinking:\n");
98             m_graph.dump();
99         }
100         
101         return true;
102     }
103
104 private:
105     bool performSinking()
106     {
107         m_graph.computeRefCounts();
108         performLivenessAnalysis(m_graph);
109         performOSRAvailabilityAnalysis(m_graph);
110         
111         CString graphBeforeSinking;
112         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
113             StringPrintStream out;
114             m_graph.dump(out);
115             graphBeforeSinking = out.toCString();
116         }
117         
118         if (verbose) {
119             dataLog("Graph before sinking:\n");
120             m_graph.dump();
121         }
122         
123         determineMaterializationPoints();
124         if (m_sinkCandidates.isEmpty())
125             return false;
126         
127         // At this point we are committed to sinking the sinking candidates.
128         placeMaterializationPoints();
129         lowerNonReadingOperationsOnPhantomAllocations();
130         promoteSunkenFields();
131         
132         if (Options::validateGraphAtEachPhase())
133             validate(m_graph, DumpGraph, graphBeforeSinking);
134         
135         if (verbose)
136             dataLog("Sinking iteration changed the graph.\n");
137         return true;
138     }
139     
140     void determineMaterializationPoints()
141     {
142         // The premise of this pass is that if there exists a point in the program where some
143         // path from a phantom allocation site to that point causes materialization, then *all*
144         // paths cause materialization. This should mean that there are never any redundant
145         // materializations.
146         
147         m_sinkCandidates.clear();
148         m_materializationToEscapee.clear();
149         m_materializationSiteToMaterializations.clear();
150         
151         BlockMap<HashMap<Node*, bool>> materializedAtHead(m_graph);
152         BlockMap<HashMap<Node*, bool>> materializedAtTail(m_graph);
153         
154         bool changed;
155         do {
156             if (verbose)
157                 dataLog("Doing iteration of materialization point placement.\n");
158             changed = false;
159             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
160                 HashMap<Node*, bool> materialized = materializedAtHead[block];
161                 if (verbose)
162                     dataLog("    Looking at block ", pointerDump(block), "\n");
163                 for (Node* node : *block) {
164                     handleNode(
165                         node,
166                         [&] () {
167                             materialized.add(node, false);
168                         },
169                         [&] (Node* escapee) {
170                             auto iter = materialized.find(escapee);
171                             if (iter != materialized.end()) {
172                                 if (verbose)
173                                     dataLog("    ", escapee, " escapes at ", node, "\n");
174                                 iter->value = true;
175                             }
176                         });
177                 }
178                 
179                 if (verbose)
180                     dataLog("    Materialized at tail of ", pointerDump(block), ": ", mapDump(materialized), "\n");
181                 
182                 if (materialized == materializedAtTail[block])
183                     continue;
184                 
185                 materializedAtTail[block] = materialized;
186                 changed = true;
187                 
188                 // Only propagate things to our successors if they are alive in all successors.
189                 // So, we prune materialized-at-tail to only include things that are live.
190                 Vector<Node*> toRemove;
191                 for (auto pair : materialized) {
192                     if (!block->ssa->liveAtTail.contains(pair.key))
193                         toRemove.append(pair.key);
194                 }
195                 for (Node* key : toRemove)
196                     materialized.remove(key);
197                 
198                 for (BasicBlock* successorBlock : block->successors()) {
199                     for (auto pair : materialized) {
200                         materializedAtHead[successorBlock].add(
201                             pair.key, false).iterator->value |= pair.value;
202                     }
203                 }
204             }
205         } while (changed);
206         
207         // Determine the sink candidates. Broadly, a sink candidate is a node that handleNode()
208         // believes is sinkable, and one of the following is true:
209         //
210         // 1) There exists a basic block with only backward outgoing edges (or no outgoing edges)
211         //    in which the node wasn't materialized. This is meant to catch effectively-infinite
212         //    loops in which we don't need to have allocated the object.
213         //
214         // 2) There exists a basic block at the tail of which the node is not materialized and the
215         //    node is dead.
216         //
217         // 3) The sum of execution counts of the materializations is less than the sum of
218         //    execution counts of the original node.
219         //
220         // We currently implement only rule #2.
221         // FIXME: Implement the two other rules.
222         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
223         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
224         
225         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
226             for (auto pair : materializedAtTail[block]) {
227                 if (pair.value)
228                     continue; // It was materialized.
229                 
230                 if (block->ssa->liveAtTail.contains(pair.key))
231                     continue; // It might still get materialized in all of the successors.
232                 
233                 // We know that it died in this block and it wasn't materialized. That means that
234                 // if we sink this allocation, then *this* will be a path along which we never
235                 // have to allocate. Profit!
236                 m_sinkCandidates.add(pair.key);
237             }
238         }
239         
240         if (m_sinkCandidates.isEmpty())
241             return;
242         
243         // A materialization edge exists at any point where a node escapes but hasn't been
244         // materialized yet. We do this in two parts. First we find all of the nodes that cause
245         // escaping to happen, where the escapee had not yet been materialized. This catches
246         // everything but loops. We then catch loops - as well as weirder control flow constructs -
247         // in a subsequent pass that looks at places in the CFG where an edge exists from a block
248         // that hasn't materialized to a block that has. We insert the materialization along such an
249         // edge, and we rely on the fact that critical edges were already broken so that we actually
250         // either insert the materialization at the head of the successor or the tail of the
251         // predecessor.
252         //
253         // FIXME: This can create duplicate allocations when we really only needed to perform one.
254         // For example:
255         //
256         //     var o = new Object();
257         //     if (rare) {
258         //         if (stuff)
259         //             call(o); // o escapes here.
260         //         return;
261         //     }
262         //     // o doesn't escape down here.
263         //
264         // In this example, we would place a materialization point at call(o) and then we would find
265         // ourselves having to insert another one at the implicit else case of that if statement
266         // ('cause we've broken critical edges). We would instead really like to just have one
267         // materialization point right at the top of the then case of "if (rare)". To do this, we can
268         // find the LCA of the various materializations in the dom tree.
269         // https://bugs.webkit.org/show_bug.cgi?id=137124
270         
271         // First pass: find intra-block materialization points.
272         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
273             HashSet<Node*> materialized;
274             for (auto pair : materializedAtHead[block]) {
275                 if (pair.value && m_sinkCandidates.contains(pair.key))
276                     materialized.add(pair.key);
277             }
278             
279             if (verbose)
280                 dataLog("Placing materialization points in ", pointerDump(block), " with materialization set ", listDump(materialized), "\n");
281             
282             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
283                 Node* node = block->at(nodeIndex);
284                 
285                 handleNode(
286                     node,
287                     [&] () { },
288                     [&] (Node* escapee) {
289                         if (verbose)
290                             dataLog("Seeing ", escapee, " escape at ", node, "\n");
291                         
292                         if (!m_sinkCandidates.contains(escapee)) {
293                             if (verbose)
294                                 dataLog("    It's not a sink candidate.\n");
295                             return;
296                         }
297                         
298                         if (!materialized.add(escapee).isNewEntry) {
299                             if (verbose)
300                                 dataLog("   It's already materialized.\n");
301                             return;
302                         }
303                         
304                         createMaterialize(escapee, node);
305                     });
306             }
307         }
308         
309         // Second pass: find CFG edge materialization points.
310         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
311             for (BasicBlock* successorBlock : block->successors()) {
312                 for (auto pair : materializedAtHead[successorBlock]) {
313                     Node* allocation = pair.key;
314                     
315                     // We only care if it is materialized in the successor.
316                     if (!pair.value)
317                         continue;
318                     
319                     // We only care about sinking candidates.
320                     if (!m_sinkCandidates.contains(allocation))
321                         continue;
322                     
323                     // We only care if it isn't materialized in the predecessor.
324                     if (materializedAtTail[block].get(allocation))
325                         continue;
326                     
327                     // We need to decide if we put the materialization at the head of the successor,
328                     // or the tail of the predecessor. It needs to be placed so that the allocation
329                     // is never materialized before, and always materialized after.
330                     
331                     // Is it never materialized in any of successor's predecessors? I like to think
332                     // of "successors' predecessors" and "predecessor's successors" as the "shadow",
333                     // because of what it looks like when you draw it.
334                     bool neverMaterializedInShadow = true;
335                     for (BasicBlock* shadowBlock : successorBlock->predecessors) {
336                         if (materializedAtTail[shadowBlock].get(allocation)) {
337                             neverMaterializedInShadow = false;
338                             break;
339                         }
340                     }
341                     
342                     if (neverMaterializedInShadow) {
343                         createMaterialize(allocation, successorBlock->firstOriginNode());
344                         continue;
345                     }
346                     
347                     // Because we had broken critical edges, it must be the case that the
348                     // predecessor's successors all materialize the object. This is because the
349                     // previous case is guaranteed to catch the case where the successor only has
350                     // one predecessor. When critical edges are broken, this is also the only case
351                     // where the predecessor could have had multiple successors. Therefore we have
352                     // already handled the case where the predecessor has multiple successors.
353                     DFG_ASSERT(m_graph, block, block->numSuccessors() == 1);
354                     
355                     createMaterialize(allocation, block->last());
356                 }
357             }
358         }
359     }
360     
361     void placeMaterializationPoints()
362     {
363         m_ssaCalculator.reset();
364         
365         // The "variables" are the object allocations that we are sinking. So, nodeToVariable maps
366         // sink candidates (aka escapees) to the SSACalculator's notion of Variable, and indexToNode
367         // maps in the opposite direction using the SSACalculator::Variable::index() as the key.
368         HashMap<Node*, SSACalculator::Variable*> nodeToVariable;
369         Vector<Node*> indexToNode;
370         
371         for (Node* node : m_sinkCandidates) {
372             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
373             nodeToVariable.add(node, variable);
374             ASSERT(indexToNode.size() == variable->index());
375             indexToNode.append(node);
376         }
377         
378         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
379             for (Node* node : *block) {
380                 if (SSACalculator::Variable* variable = nodeToVariable.get(node))
381                     m_ssaCalculator.newDef(variable, block, node);
382                 
383                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
384                     m_ssaCalculator.newDef(
385                         nodeToVariable.get(m_materializationToEscapee.get(materialize)),
386                         block, materialize);
387                 }
388             }
389         }
390         
391         m_ssaCalculator.computePhis(
392             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
393                 Node* allocation = indexToNode[variable->index()];
394                 if (!block->ssa->liveAtHead.contains(allocation))
395                     return nullptr;
396                 
397                 Node* phiNode = m_graph.addNode(allocation->prediction(), Phi, NodeOrigin());
398                 phiNode->mergeFlags(NodeResultJS);
399                 return phiNode;
400             });
401         
402         // Place Phis in the right places. Replace all uses of any allocation with the appropriate
403         // materialization. Create the appropriate Upsilon nodes.
404         LocalOSRAvailabilityCalculator availabilityCalculator;
405         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
406             HashMap<Node*, Node*> mapping;
407             
408             for (Node* candidate : block->ssa->liveAtHead) {
409                 SSACalculator::Variable* variable = nodeToVariable.get(candidate);
410                 if (!variable)
411                     continue;
412                 
413                 SSACalculator::Def* def = m_ssaCalculator.reachingDefAtHead(block, variable);
414                 if (!def)
415                     continue;
416                 
417                 mapping.set(indexToNode[variable->index()], def->value());
418             }
419             
420             availabilityCalculator.beginBlock(block);
421             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
422                 m_insertionSet.insert(0, phiDef->value());
423                 
424                 Node* originalNode = indexToNode[phiDef->variable()->index()];
425                 insertOSRHintsForUpdate(
426                     m_insertionSet, 0, NodeOrigin(), availabilityCalculator.m_availability,
427                     originalNode, phiDef->value());
428
429                 mapping.set(originalNode, phiDef->value());
430             }
431             
432             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
433                 Node* node = block->at(nodeIndex);
434
435                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
436                     Node* escapee = m_materializationToEscapee.get(materialize);
437                     m_insertionSet.insert(nodeIndex, materialize);
438                     insertOSRHintsForUpdate(
439                         m_insertionSet, nodeIndex, node->origin,
440                         availabilityCalculator.m_availability, escapee, materialize);
441                     mapping.set(escapee, materialize);
442                 }
443                     
444                 availabilityCalculator.executeNode(node);
445                 
446                 m_graph.doToChildren(
447                     node,
448                     [&] (Edge& edge) {
449                         if (Node* materialize = mapping.get(edge.node()))
450                             edge.setNode(materialize);
451                     });
452                 
453                 // If you cause an escape, you shouldn't see the original escapee.
454                 if (validationEnabled()) {
455                     handleNode(
456                         node,
457                         [&] () { },
458                         [&] (Node* escapee) {
459                             DFG_ASSERT(m_graph, node, !m_sinkCandidates.contains(escapee));
460                         });
461                 }
462             }
463             
464             size_t upsilonInsertionPoint = block->size() - 1;
465             Node* upsilonWhere = block->last();
466             NodeOrigin upsilonOrigin = upsilonWhere->origin;
467             for (BasicBlock* successorBlock : block->successors()) {
468                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
469                     Node* phiNode = phiDef->value();
470                     SSACalculator::Variable* variable = phiDef->variable();
471                     Node* allocation = indexToNode[variable->index()];
472                     
473                     Node* originalIncoming = mapping.get(allocation);
474                     Node* incoming;
475                     if (originalIncoming == allocation) {
476                         // If we have a Phi that combines materializations with the original
477                         // phantom object, then the path with the phantom object must materialize.
478                         
479                         incoming = createMaterialize(allocation, upsilonWhere);
480                         m_insertionSet.insert(upsilonInsertionPoint, incoming);
481                         insertOSRHintsForUpdate(
482                             m_insertionSet, upsilonInsertionPoint, upsilonOrigin,
483                             availabilityCalculator.m_availability, originalIncoming, incoming);
484                     } else
485                         incoming = originalIncoming;
486                     
487                     m_insertionSet.insertNode(
488                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
489                         OpInfo(phiNode), incoming->defaultEdge());
490                 }
491             }
492             
493             m_insertionSet.execute(block);
494         }
495         
496         // At this point we have dummy materialization nodes along with edges to them. This means
497         // that the part of the control flow graph that prefers to see actual object allocations
498         // is completely fixed up, except for the materializations themselves.
499     }
500     
501     void lowerNonReadingOperationsOnPhantomAllocations()
502     {
503         // Lower everything but reading operations on phantom allocations. We absolutely have to
504         // lower all writes so as to reveal them to the SSA calculator. We cannot lower reads
505         // because the whole point is that those go away completely.
506         
507         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
508             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
509                 Node* node = block->at(nodeIndex);
510                 switch (node->op()) {
511                 case PutByOffset: {
512                     if (m_sinkCandidates.contains(node->child2().node()))
513                         node->convertToPutByOffsetHint();
514                     break;
515                 }
516                     
517                 case PutStructure: {
518                     if (m_sinkCandidates.contains(node->child1().node())) {
519                         Node* structure = m_insertionSet.insertConstant(
520                             nodeIndex, node->origin, JSValue(node->transition()->next));
521                         node->convertToPutStructureHint(structure);
522                     }
523                     break;
524                 }
525                     
526                 case NewObject: {
527                     if (m_sinkCandidates.contains(node)) {
528                         Node* structure = m_insertionSet.insertConstant(
529                             nodeIndex + 1, node->origin, JSValue(node->structure()));
530                         m_insertionSet.insertNode(
531                             nodeIndex + 1, SpecNone, PutStructureHint, node->origin,
532                             Edge(node, KnownCellUse), Edge(structure, KnownCellUse));
533                         node->convertToPhantomNewObject();
534                     }
535                     break;
536                 }
537                     
538                 case MaterializeNewObject: {
539                     if (m_sinkCandidates.contains(node)) {
540                         m_insertionSet.insertNode(
541                             nodeIndex + 1, SpecNone, PutStructureHint, node->origin,
542                             Edge(node, KnownCellUse), m_graph.varArgChild(node, 0));
543                         for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
544                             m_insertionSet.insertNode(
545                                 nodeIndex + 1, SpecNone, PutByOffsetHint, node->origin,
546                                 Edge(node, KnownCellUse), m_graph.varArgChild(node, i + 1));
547                         }
548                         node->convertToPhantomNewObject();
549                     }
550                     break;
551                 }
552                     
553                 case StoreBarrier:
554                 case StoreBarrierWithNullCheck: {
555                     if (m_sinkCandidates.contains(node->child1().node()))
556                         node->convertToPhantom();
557                     break;
558                 }
559                     
560                 default:
561                     break;
562                 }
563                 
564                 m_graph.doToChildren(
565                     node,
566                     [&] (Edge& edge) {
567                         if (m_sinkCandidates.contains(edge.node()))
568                             edge.setUseKind(KnownCellUse);
569                     });
570             }
571             m_insertionSet.execute(block);
572         }
573     }
574     
575     void promoteSunkenFields()
576     {
577         // Collect the set of heap locations that we will be operating over.
578         HashSet<PromotedHeapLocation> locations;
579         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
580             for (Node* node : *block) {
581                 promoteHeapAccess(
582                     node,
583                     [&] (PromotedHeapLocation location, Edge) {
584                         if (m_sinkCandidates.contains(location.base()))
585                             locations.add(location);
586                     },
587                     [&] (PromotedHeapLocation location) {
588                         if (m_sinkCandidates.contains(location.base()))
589                             locations.add(location);
590                     });
591             }
592         }
593         
594         // Figure out which locations belong to which allocations.
595         m_locationsForAllocation.clear();
596         for (PromotedHeapLocation location : locations) {
597             auto result = m_locationsForAllocation.add(location.base(), Vector<PromotedHeapLocation>());
598             ASSERT(!result.iterator->value.contains(location));
599             result.iterator->value.append(location);
600         }
601         
602         // For each sunken thingy, make sure we create Bottom values for all of its fields.
603         // Note that this has the hilarious slight inefficiency of creating redundant hints for
604         // things that were previously materializations. This should only impact compile times and
605         // not code quality, and it's necessary for soundness without some data structure hackage.
606         // For example, a MaterializeNewObject that we choose to sink may have new fields added to
607         // it conditionally. That would necessitate Bottoms.
608         Node* bottom = nullptr;
609         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
610             if (block == m_graph.block(0))
611                 bottom = m_insertionSet.insertNode(0, SpecNone, BottomValue, NodeOrigin());
612             
613             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
614                 Node* node = block->at(nodeIndex);
615                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
616                     m_insertionSet.insert(
617                         nodeIndex + 1, location.createHint(m_graph, node->origin, bottom));
618                 }
619             }
620             m_insertionSet.execute(block);
621         }
622
623         m_ssaCalculator.reset();
624
625         // Collect the set of "variables" that we will be sinking.
626         m_locationToVariable.clear();
627         m_indexToLocation.clear();
628         for (PromotedHeapLocation location : locations) {
629             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
630             m_locationToVariable.add(location, variable);
631             ASSERT(m_indexToLocation.size() == variable->index());
632             m_indexToLocation.append(location);
633         }
634         
635         // Create Defs from the existing hints.
636         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
637             for (Node* node : *block) {
638                 promoteHeapAccess(
639                     node,
640                     [&] (PromotedHeapLocation location, Edge value) {
641                         if (!m_sinkCandidates.contains(location.base()))
642                             return;
643                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
644                         m_ssaCalculator.newDef(variable, block, value.node());
645                     },
646                     [&] (PromotedHeapLocation) { });
647             }
648         }
649         
650         // OMG run the SSA calculator to create Phis!
651         m_ssaCalculator.computePhis(
652             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
653                 PromotedHeapLocation location = m_indexToLocation[variable->index()];
654                 if (!block->ssa->liveAtHead.contains(location.base()))
655                     return nullptr;
656                 
657                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
658                 phiNode->mergeFlags(NodeResultJS);
659                 return phiNode;
660             });
661         
662         // Place Phis in the right places, replace all uses of any load with the appropriate
663         // value, and create the appropriate Upsilon nodes.
664         m_graph.clearReplacements();
665         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
666             // This mapping table is intended to be lazy. If something is omitted from the table,
667             // it means that there haven't been any local stores to that promoted heap location.
668             m_localMapping.clear();
669             
670             // Insert the Phi functions that we had previously created.
671             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
672                 PromotedHeapLocation location = m_indexToLocation[phiDef->variable()->index()];
673                 
674                 m_insertionSet.insert(
675                     0, phiDef->value());
676                 m_insertionSet.insert(
677                     0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
678                 m_localMapping.add(location, phiDef->value());
679             }
680             
681             if (verbose)
682                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
683             
684             // Process the block and replace all uses of loads with the promoted value.
685             for (Node* node : *block) {
686                 m_graph.performSubstitution(node);
687                 
688                 if (Node* escapee = m_materializationToEscapee.get(node))
689                     populateMaterialize(block, node, escapee);
690                 
691                 promoteHeapAccess(
692                     node,
693                     [&] (PromotedHeapLocation location, Edge value) {
694                         if (m_sinkCandidates.contains(location.base()))
695                             m_localMapping.set(location, value.node());
696                     },
697                     [&] (PromotedHeapLocation location) {
698                         if (m_sinkCandidates.contains(location.base()))
699                             node->replaceWith(resolve(block, location));
700                     });
701             }
702             
703             // Gotta drop some Upsilons.
704             size_t upsilonInsertionPoint = block->size() - 1;
705             NodeOrigin upsilonOrigin = block->last()->origin;
706             for (BasicBlock* successorBlock : block->successors()) {
707                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
708                     Node* phiNode = phiDef->value();
709                     SSACalculator::Variable* variable = phiDef->variable();
710                     PromotedHeapLocation location = m_indexToLocation[variable->index()];
711                     Node* incoming = resolve(block, location);
712                     
713                     m_insertionSet.insertNode(
714                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
715                         OpInfo(phiNode), incoming->defaultEdge());
716                 }
717             }
718             
719             m_insertionSet.execute(block);
720         }
721     }
722     
723     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
724     {
725         if (Node* result = m_localMapping.get(location))
726             return result;
727         
728         // This implies that there is no local mapping. Find a non-local mapping.
729         SSACalculator::Def* def = m_ssaCalculator.nonLocalReachingDef(
730             block, m_locationToVariable.get(location));
731         ASSERT(def);
732         ASSERT(def->value());
733         m_localMapping.add(location, def->value());
734         return def->value();
735     }
736
737     template<typename SinkCandidateFunctor, typename EscapeFunctor>
738     void handleNode(
739         Node* node,
740         const SinkCandidateFunctor& sinkCandidate,
741         const EscapeFunctor& escape)
742     {
743         switch (node->op()) {
744         case NewObject:
745         case MaterializeNewObject:
746             sinkCandidate();
747             m_graph.doToChildren(
748                 node,
749                 [&] (Edge edge) {
750                     escape(edge.node());
751                 });
752             break;
753             
754         case CheckStructure:
755         case GetByOffset:
756         case MultiGetByOffset:
757         case PutStructure:
758         case GetGetterSetterByOffset:
759         case MovHint:
760         case Phantom:
761         case Check:
762         case HardPhantom:
763         case StoreBarrier:
764         case StoreBarrierWithNullCheck:
765         case PutByOffsetHint:
766             break;
767             
768         case PutByOffset:
769             escape(node->child3().node());
770             break;
771             
772         case MultiPutByOffset:
773             // FIXME: In the future we should be able to handle this. It's just a matter of
774             // building the appropriate *Hint variant of this instruction, along with a
775             // PhantomStructureSelect node - since this transforms the Structure in a conditional
776             // way.
777             // https://bugs.webkit.org/show_bug.cgi?id=136924
778             escape(node->child1().node());
779             escape(node->child2().node());
780             break;
781
782         default:
783             m_graph.doToChildren(
784                 node,
785                 [&] (Edge edge) {
786                     escape(edge.node());
787                 });
788             break;
789         }
790     }
791     
792     Node* createMaterialize(Node* escapee, Node* where)
793     {
794         Node* result = nullptr;
795         
796         switch (escapee->op()) {
797         case NewObject:
798         case MaterializeNewObject: {
799             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
800             
801             result = m_graph.addNode(
802                 escapee->prediction(), Node::VarArg, MaterializeNewObject,
803                 NodeOrigin(
804                     escapee->origin.semantic,
805                     where->origin.forExit),
806                 OpInfo(data), OpInfo(), 0, 0);
807             break;
808         }
809             
810         default:
811             DFG_CRASH(m_graph, escapee, "Bad escapee op");
812             break;
813         }
814         
815         if (verbose)
816             dataLog("Creating materialization point at ", where, " for ", escapee, ": ", result, "\n");
817         
818         m_materializationToEscapee.add(result, escapee);
819         m_materializationSiteToMaterializations.add(
820             where, Vector<Node*>()).iterator->value.append(result);
821         
822         return result;
823     }
824     
825     void populateMaterialize(BasicBlock* block, Node* node, Node* escapee)
826     {
827         switch (node->op()) {
828         case MaterializeNewObject: {
829             ObjectMaterializationData& data = node->objectMaterializationData();
830             unsigned firstChild = m_graph.m_varArgChildren.size();
831             
832             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
833             
834             PromotedHeapLocation structure(StructurePLoc, escapee);
835             ASSERT(locations.contains(structure));
836             
837             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
838             
839             for (unsigned i = 0; i < locations.size(); ++i) {
840                 switch (locations[i].kind()) {
841                 case StructurePLoc: {
842                     ASSERT(locations[i] == structure);
843                     break;
844                 }
845                     
846                 case NamedPropertyPLoc: {
847                     Node* value = resolve(block, locations[i]);
848                     if (value->op() == BottomValue) {
849                         // We can skip Bottoms entirely.
850                         break;
851                     }
852                     
853                     data.m_properties.append(PhantomPropertyValue(locations[i].info()));
854                     m_graph.m_varArgChildren.append(value);
855                     break;
856                 }
857                     
858                 default:
859                     DFG_CRASH(m_graph, node, "Bad location kind");
860                 }
861             }
862             
863             node->children = AdjacencyList(
864                 AdjacencyList::Variable,
865                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
866             break;
867         }
868             
869         default:
870             DFG_CRASH(m_graph, node, "Bad materialize op");
871             break;
872         }
873     }
874     
875     SSACalculator m_ssaCalculator;
876     HashSet<Node*> m_sinkCandidates;
877     HashMap<Node*, Node*> m_materializationToEscapee;
878     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
879     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
880     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
881     Vector<PromotedHeapLocation> m_indexToLocation;
882     HashMap<PromotedHeapLocation, Node*> m_localMapping;
883     InsertionSet m_insertionSet;
884 };
885     
886 bool performObjectAllocationSinking(Graph& graph)
887 {
888     SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
889     return runPhase<ObjectAllocationSinkingPhase>(graph);
890 }
891
892 } } // namespace JSC::DFG
893
894 #endif // ENABLE(DFG_JIT)
895