Unreviewed, rolling out r174275.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractHeap.h"
32 #include "DFGBlockMapInlines.h"
33 #include "DFGClobberize.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertOSRHintsForUpdate.h"
36 #include "DFGInsertionSet.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromoteHeapAccess.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 namespace JSC { namespace DFG {
46
47 static bool verbose = false;
48
49 class ObjectAllocationSinkingPhase : public Phase {
50 public:
51     ObjectAllocationSinkingPhase(Graph& graph)
52         : Phase(graph, "object allocation sinking")
53         , m_ssaCalculator(graph)
54         , m_insertionSet(graph)
55     {
56     }
57     
58     bool run()
59     {
60         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
61         
62         m_graph.m_dominators.computeIfNecessary(m_graph);
63         
64         // Logically we wish to consider every NewObject and sink it. However it's probably not
65         // profitable to sink a NewObject that will always escape. So, first we do a very simple
66         // forward flow analysis that determines the set of NewObject nodes that have any chance
67         // of benefiting from object allocation sinking. Then we fixpoint the following rules:
68         //
69         // - For each NewObject, we turn the original NewObject into a PhantomNewObject and then
70         //   we insert MaterializeNewObject just before those escaping sites that come before any
71         //   other escaping sites - that is, there is no path between the allocation and those sites
72         //   that would see any other escape. Note that Upsilons constitute escaping sites. Then we
73         //   insert additional MaterializeNewObject nodes on Upsilons that feed into Phis that mix
74         //   materializations and the original PhantomNewObject. We then turn each PutByOffset over a
75         //   PhantomNewObject into a PutByOffsetHint.
76         //
77         // - We perform the same optimization for MaterializeNewObject. This allows us to cover
78         //   cases where we had MaterializeNewObject flowing into a PutByOffsetHint.
79         //
80         // We could also add this rule:
81         //
82         // - If all of the Upsilons of a Phi have a MaterializeNewObject that isn't used by anyone
83         //   else, then replace the Phi with the MaterializeNewObject.
84         //
85         //   FIXME: Implement this. Note that this totally doable but it requires some gnarly
86         //   code, and to be effective the pruner needs to be aware of it. Currently any Upsilon
87         //   is considered to be an escape even by the pruner, so it's unlikely that we'll see
88         //   many cases of Phi over Materializations.
89         //   https://bugs.webkit.org/show_bug.cgi?id=136927
90         
91         if (!performSinking())
92             return false;
93         
94         while (performSinking()) { }
95         
96         if (verbose) {
97             dataLog("Graph after sinking:\n");
98             m_graph.dump();
99         }
100         
101         return true;
102     }
103
104 private:
105     bool performSinking()
106     {
107         m_graph.computeRefCounts();
108         performLivenessAnalysis(m_graph);
109         performOSRAvailabilityAnalysis(m_graph);
110         
111         CString graphBeforeSinking;
112         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
113             StringPrintStream out;
114             m_graph.dump(out);
115             graphBeforeSinking = out.toCString();
116         }
117         
118         if (verbose) {
119             dataLog("Graph before sinking:\n");
120             m_graph.dump();
121         }
122         
123         determineMaterializationPoints();
124         if (m_sinkCandidates.isEmpty())
125             return false;
126         
127         // At this point we are committed to sinking the sinking candidates.
128         placeMaterializationPoints();
129         lowerNonReadingOperationsOnPhantomAllocations();
130         promoteSunkenFields();
131         
132         if (Options::validateGraphAtEachPhase())
133             validate(m_graph, DumpGraph, graphBeforeSinking);
134         
135         if (verbose)
136             dataLog("Sinking iteration changed the graph.\n");
137         return true;
138     }
139     
140     void determineMaterializationPoints()
141     {
142         // The premise of this pass is that if there exists a point in the program where some
143         // path from a phantom allocation site to that point causes materialization, then *all*
144         // paths cause materialization. This should mean that there are never any redundant
145         // materializations.
146         
147         m_sinkCandidates.clear();
148         m_materializationToEscapee.clear();
149         m_materializationSiteToMaterializations.clear();
150         
151         BlockMap<HashMap<Node*, bool>> materializedAtHead(m_graph);
152         BlockMap<HashMap<Node*, bool>> materializedAtTail(m_graph);
153         
154         bool changed;
155         do {
156             if (verbose)
157                 dataLog("Doing iteration of materialization point placement.\n");
158             changed = false;
159             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
160                 HashMap<Node*, bool> materialized = materializedAtHead[block];
161                 for (Node* node : *block) {
162                     handleNode(
163                         node,
164                         [&] () {
165                             materialized.add(node, false);
166                         },
167                         [&] (Node* escapee) {
168                             auto iter = materialized.find(escapee);
169                             if (iter != materialized.end())
170                                 iter->value = true;
171                         });
172                 }
173                 
174                 if (verbose)
175                     dataLog("    Materialized at tail of ", pointerDump(block), ": ", mapDump(materialized), "\n");
176                 
177                 if (materialized == materializedAtTail[block])
178                     continue;
179                 
180                 materializedAtTail[block] = materialized;
181                 changed = true;
182                 
183                 // Only propagate things to our successors if they are alive in all successors.
184                 // So, we prune materialized-at-tail to only include things that are live.
185                 Vector<Node*> toRemove;
186                 for (auto pair : materialized) {
187                     if (!block->ssa->liveAtTail.contains(pair.key))
188                         toRemove.append(pair.key);
189                 }
190                 for (Node* key : toRemove)
191                     materialized.remove(key);
192                 
193                 for (BasicBlock* successorBlock : block->successors()) {
194                     for (auto pair : materialized) {
195                         materializedAtHead[successorBlock].add(
196                             pair.key, false).iterator->value |= pair.value;
197                     }
198                 }
199             }
200         } while (changed);
201         
202         // Determine the sink candidates. Broadly, a sink candidate is a node that handleNode()
203         // believes is sinkable, and one of the following is true:
204         //
205         // 1) There exists a basic block with only backward outgoing edges (or no outgoing edges)
206         //    in which the node wasn't materialized. This is meant to catch effectively-infinite
207         //    loops in which we don't need to have allocated the object.
208         //
209         // 2) There exists a basic block at the tail of which the node is not materialized and the
210         //    node is dead.
211         //
212         // 3) The sum of execution counts of the materializations is less than the sum of
213         //    execution counts of the original node.
214         //
215         // We currently implement only rule #2.
216         // FIXME: Implement the two other rules.
217         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
218         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
219         
220         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
221             for (auto pair : materializedAtTail[block]) {
222                 if (pair.value)
223                     continue; // It was materialized.
224                 
225                 if (block->ssa->liveAtTail.contains(pair.key))
226                     continue; // It might still get materialized in all of the successors.
227                 
228                 // We know that it died in this block and it wasn't materialized. That means that
229                 // if we sink this allocation, then *this* will be a path along which we never
230                 // have to allocate. Profit!
231                 m_sinkCandidates.add(pair.key);
232             }
233         }
234         
235         if (m_sinkCandidates.isEmpty())
236             return;
237         
238         // A materialization edge exists at any point where a node escapes but hasn't been
239         // materialized yet. We do this in two parts. First we find all of the nodes that cause
240         // escaping to happen, where the escapee had not yet been materialized. This catches
241         // everything but loops. We then catch loops - as well as weirder control flow constructs -
242         // in a subsequent pass that looks at places in the CFG where an edge exists from a block
243         // that hasn't materialized to a block that has. We insert the materialization along such an
244         // edge, and we rely on the fact that critical edges were already broken so that we actually
245         // either insert the materialization at the head of the successor or the tail of the
246         // predecessor.
247         //
248         // FIXME: This can create duplicate allocations when we really only needed to perform one.
249         // For example:
250         //
251         //     var o = new Object();
252         //     if (rare) {
253         //         if (stuff)
254         //             call(o); // o escapes here.
255         //         return;
256         //     }
257         //     // o doesn't escape down here.
258         //
259         // In this example, we would place a materialization point at call(o) and then we would find
260         // ourselves having to insert another one at the implicit else case of that if statement
261         // ('cause we've broken critical edges). We would instead really like to just have one
262         // materialization point right at the top of the then case of "if (rare)". To do this, we can
263         // find the LCA of the various materializations in the dom tree.
264         // https://bugs.webkit.org/show_bug.cgi?id=137124
265         
266         // First pass: find intra-block materialization points.
267         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
268             HashSet<Node*> materialized;
269             for (auto pair : materializedAtHead[block]) {
270                 if (pair.value && m_sinkCandidates.contains(pair.key))
271                     materialized.add(pair.key);
272             }
273             
274             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
275                 Node* node = block->at(nodeIndex);
276                 
277                 handleNode(
278                     node,
279                     [&] () { },
280                     [&] (Node* escapee) {
281                         if (!m_sinkCandidates.contains(escapee))
282                             return;
283                         
284                         if (!materialized.add(escapee).isNewEntry)
285                             return;
286                         
287                         createMaterialize(escapee, node);
288                     });
289             }
290         }
291         
292         // Second pass: find CFG edge materialization points.
293         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
294             for (BasicBlock* successorBlock : block->successors()) {
295                 for (auto pair : materializedAtHead[successorBlock]) {
296                     Node* allocation = pair.key;
297                     
298                     // We only care if it is materialized in the successor.
299                     if (!pair.value)
300                         continue;
301                     
302                     // We only care about sinking candidates.
303                     if (!m_sinkCandidates.contains(allocation))
304                         continue;
305                     
306                     // We only care if it isn't materialized in the predecessor.
307                     if (materializedAtTail[block].get(allocation))
308                         continue;
309                     
310                     // We need to decide if we put the materialization at the head of the successor,
311                     // or the tail of the predecessor. It needs to be placed so that the allocation
312                     // is never materialized before, and always materialized after.
313                     
314                     // Is it never materialized in any of successor's predecessors? I like to think
315                     // of "successors' predecessors" and "predecessor's successors" as the "shadow",
316                     // because of what it looks like when you draw it.
317                     bool neverMaterializedInShadow = true;
318                     for (BasicBlock* shadowBlock : successorBlock->predecessors) {
319                         if (materializedAtTail[shadowBlock].get(allocation)) {
320                             neverMaterializedInShadow = false;
321                             break;
322                         }
323                     }
324                     
325                     if (neverMaterializedInShadow) {
326                         createMaterialize(allocation, successorBlock->firstOriginNode());
327                         continue;
328                     }
329                     
330                     // Because we had broken critical edges, it must be the case that the
331                     // predecessor's successors all materialize the object. This is because the
332                     // previous case is guaranteed to catch the case where the successor only has
333                     // one predecessor. When critical edges are broken, this is also the only case
334                     // where the predecessor could have had multiple successors. Therefore we have
335                     // already handled the case where the predecessor has multiple successors.
336                     DFG_ASSERT(m_graph, block, block->numSuccessors() == 1);
337                     
338                     createMaterialize(allocation, block->last());
339                 }
340             }
341         }
342     }
343     
344     void placeMaterializationPoints()
345     {
346         m_ssaCalculator.reset();
347         
348         // The "variables" are the object allocations that we are sinking. So, nodeToVariable maps
349         // sink candidates (aka escapees) to the SSACalculator's notion of Variable, and indexToNode
350         // maps in the opposite direction using the SSACalculator::Variable::index() as the key.
351         HashMap<Node*, SSACalculator::Variable*> nodeToVariable;
352         Vector<Node*> indexToNode;
353         
354         for (Node* node : m_sinkCandidates) {
355             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
356             nodeToVariable.add(node, variable);
357             ASSERT(indexToNode.size() == variable->index());
358             indexToNode.append(node);
359         }
360         
361         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
362             for (Node* node : *block) {
363                 if (SSACalculator::Variable* variable = nodeToVariable.get(node))
364                     m_ssaCalculator.newDef(variable, block, node);
365                 
366                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
367                     m_ssaCalculator.newDef(
368                         nodeToVariable.get(m_materializationToEscapee.get(materialize)),
369                         block, materialize);
370                 }
371             }
372         }
373         
374         m_ssaCalculator.computePhis(
375             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
376                 Node* allocation = indexToNode[variable->index()];
377                 if (!block->ssa->liveAtHead.contains(allocation))
378                     return nullptr;
379                 
380                 Node* phiNode = m_graph.addNode(allocation->prediction(), Phi, NodeOrigin());
381                 phiNode->mergeFlags(NodeResultJS);
382                 return phiNode;
383             });
384         
385         // Place Phis in the right places. Replace all uses of any allocation with the appropriate
386         // materialization. Create the appropriate Upsilon nodes.
387         LocalOSRAvailabilityCalculator availabilityCalculator;
388         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
389             HashMap<Node*, Node*> mapping;
390             
391             for (Node* candidate : block->ssa->liveAtHead) {
392                 SSACalculator::Variable* variable = nodeToVariable.get(candidate);
393                 if (!variable)
394                     continue;
395                 
396                 SSACalculator::Def* def = m_ssaCalculator.reachingDefAtHead(block, variable);
397                 if (!def)
398                     continue;
399                 
400                 mapping.set(indexToNode[variable->index()], def->value());
401             }
402             
403             availabilityCalculator.beginBlock(block);
404             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
405                 m_insertionSet.insert(0, phiDef->value());
406                 
407                 Node* originalNode = indexToNode[phiDef->variable()->index()];
408                 insertOSRHintsForUpdate(
409                     m_insertionSet, 0, NodeOrigin(), availabilityCalculator.m_availability,
410                     originalNode, phiDef->value());
411
412                 mapping.set(originalNode, phiDef->value());
413             }
414             
415             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
416                 Node* node = block->at(nodeIndex);
417
418                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
419                     Node* escapee = m_materializationToEscapee.get(materialize);
420                     m_insertionSet.insert(nodeIndex, materialize);
421                     insertOSRHintsForUpdate(
422                         m_insertionSet, nodeIndex, node->origin,
423                         availabilityCalculator.m_availability, escapee, materialize);
424                     mapping.set(escapee, materialize);
425                 }
426                     
427                 availabilityCalculator.executeNode(node);
428                 
429                 m_graph.doToChildren(
430                     node,
431                     [&] (Edge& edge) {
432                         if (Node* materialize = mapping.get(edge.node()))
433                             edge.setNode(materialize);
434                     });
435             }
436             
437             size_t upsilonInsertionPoint = block->size() - 1;
438             Node* upsilonWhere = block->last();
439             NodeOrigin upsilonOrigin = upsilonWhere->origin;
440             for (BasicBlock* successorBlock : block->successors()) {
441                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
442                     Node* phiNode = phiDef->value();
443                     SSACalculator::Variable* variable = phiDef->variable();
444                     Node* allocation = indexToNode[variable->index()];
445                     
446                     Node* originalIncoming = mapping.get(allocation);
447                     Node* incoming;
448                     if (originalIncoming == allocation) {
449                         // If we have a Phi that combines materializations with the original
450                         // phantom object, then the path with the phantom object must materialize.
451                         
452                         incoming = createMaterialize(allocation, upsilonWhere);
453                         m_insertionSet.insert(upsilonInsertionPoint, incoming);
454                         insertOSRHintsForUpdate(
455                             m_insertionSet, upsilonInsertionPoint, upsilonOrigin,
456                             availabilityCalculator.m_availability, originalIncoming, incoming);
457                     } else
458                         incoming = originalIncoming;
459                     
460                     m_insertionSet.insertNode(
461                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
462                         OpInfo(phiNode), incoming->defaultEdge());
463                 }
464             }
465             
466             m_insertionSet.execute(block);
467         }
468         
469         // At this point we have dummy materialization nodes along with edges to them. This means
470         // that the part of the control flow graph that prefers to see actual object allocations
471         // is completely fixed up, except for the materializations themselves.
472     }
473     
474     void lowerNonReadingOperationsOnPhantomAllocations()
475     {
476         // Lower everything but reading operations on phantom allocations. We absolutely have to
477         // lower all writes so as to reveal them to the SSA calculator. We cannot lower reads
478         // because the whole point is that those go away completely.
479         
480         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
481             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
482                 Node* node = block->at(nodeIndex);
483                 switch (node->op()) {
484                 case PutByOffset: {
485                     if (m_sinkCandidates.contains(node->child2().node()))
486                         node->convertToPutByOffsetHint();
487                     break;
488                 }
489                     
490                 case PutStructure: {
491                     if (m_sinkCandidates.contains(node->child1().node())) {
492                         Node* structure = m_insertionSet.insertConstant(
493                             nodeIndex, node->origin, JSValue(node->transition()->next));
494                         node->convertToPutStructureHint(structure);
495                     }
496                     break;
497                 }
498                     
499                 case NewObject: {
500                     if (m_sinkCandidates.contains(node)) {
501                         Node* structure = m_insertionSet.insertConstant(
502                             nodeIndex + 1, node->origin, JSValue(node->structure()));
503                         m_insertionSet.insertNode(
504                             nodeIndex + 1, SpecNone, PutStructureHint, node->origin,
505                             Edge(node, KnownCellUse), Edge(structure, KnownCellUse));
506                         node->convertToPhantomNewObject();
507                     }
508                     break;
509                 }
510                     
511                 case MaterializeNewObject: {
512                     if (m_sinkCandidates.contains(node)) {
513                         m_insertionSet.insertNode(
514                             nodeIndex + 1, SpecNone, PutStructureHint, node->origin,
515                             Edge(node, KnownCellUse), m_graph.varArgChild(node, 0));
516                         for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
517                             m_insertionSet.insertNode(
518                                 nodeIndex + 1, SpecNone, PutByOffsetHint, node->origin,
519                                 Edge(node, KnownCellUse), m_graph.varArgChild(node, i + 1));
520                         }
521                         node->convertToPhantomNewObject();
522                     }
523                     break;
524                 }
525                     
526                 case StoreBarrier:
527                 case StoreBarrierWithNullCheck: {
528                     if (m_sinkCandidates.contains(node->child1().node()))
529                         node->convertToPhantom();
530                     break;
531                 }
532                     
533                 default:
534                     break;
535                 }
536                 
537                 m_graph.doToChildren(
538                     node,
539                     [&] (Edge& edge) {
540                         if (m_sinkCandidates.contains(edge.node()))
541                             edge.setUseKind(KnownCellUse);
542                     });
543             }
544             m_insertionSet.execute(block);
545         }
546     }
547     
548     void promoteSunkenFields()
549     {
550         // Collect the set of heap locations that we will be operating over.
551         HashSet<PromotedHeapLocation> locations;
552         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
553             for (Node* node : *block) {
554                 promoteHeapAccess(
555                     node,
556                     [&] (PromotedHeapLocation location, Edge) {
557                         locations.add(location);
558                     },
559                     [&] (PromotedHeapLocation location) {
560                         locations.add(location);
561                     });
562             }
563         }
564         
565         // Figure out which locations belong to which allocations.
566         m_locationsForAllocation.clear();
567         for (PromotedHeapLocation location : locations) {
568             auto result = m_locationsForAllocation.add(location.base(), Vector<PromotedHeapLocation>());
569             ASSERT(!result.iterator->value.contains(location));
570             result.iterator->value.append(location);
571         }
572         
573         // For each sunken thingy, make sure we create Bottom values for all of its fields.
574         // Note that this has the hilarious slight inefficiency of creating redundant hints for
575         // things that were previously materializations. This should only impact compile times and
576         // not code quality, and it's necessary for soundness without some data structure hackage.
577         // For example, a MaterializeNewObject that we choose to sink may have new fields added to
578         // it conditionally. That would necessitate Bottoms.
579         Node* bottom = nullptr;
580         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
581             if (block == m_graph.block(0))
582                 bottom = m_insertionSet.insertNode(0, SpecNone, BottomValue, NodeOrigin());
583             
584             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
585                 Node* node = block->at(nodeIndex);
586                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
587                     m_insertionSet.insert(
588                         nodeIndex + 1, location.createHint(m_graph, node->origin, bottom));
589                 }
590             }
591             m_insertionSet.execute(block);
592         }
593
594         m_ssaCalculator.reset();
595
596         // Collect the set of "variables" that we will be sinking.
597         m_locationToVariable.clear();
598         m_indexToLocation.clear();
599         for (PromotedHeapLocation location : locations) {
600             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
601             m_locationToVariable.add(location, variable);
602             ASSERT(m_indexToLocation.size() == variable->index());
603             m_indexToLocation.append(location);
604         }
605         
606         // Create Defs from the existing hints.
607         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
608             for (Node* node : *block) {
609                 promoteHeapAccess(
610                     node,
611                     [&] (PromotedHeapLocation location, Edge value) {
612                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
613                         m_ssaCalculator.newDef(variable, block, value.node());
614                     },
615                     [&] (PromotedHeapLocation) { });
616             }
617         }
618         
619         // OMG run the SSA calculator to create Phis!
620         m_ssaCalculator.computePhis(
621             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
622                 PromotedHeapLocation location = m_indexToLocation[variable->index()];
623                 if (!block->ssa->liveAtHead.contains(location.base()))
624                     return nullptr;
625                 
626                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
627                 phiNode->mergeFlags(NodeResultJS);
628                 return phiNode;
629             });
630         
631         // Place Phis in the right places, replace all uses of any load with the appropriate
632         // value, and create the appropriate Upsilon nodes.
633         m_graph.clearReplacements();
634         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
635             // This mapping table is intended to be lazy. If something is omitted from the table,
636             // it means that there haven't been any local stores to that promoted heap location.
637             m_localMapping.clear();
638             
639             // Insert the Phi functions that we had previously created.
640             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
641                 PromotedHeapLocation location = m_indexToLocation[phiDef->variable()->index()];
642                 
643                 m_insertionSet.insert(
644                     0, phiDef->value());
645                 m_insertionSet.insert(
646                     0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
647                 m_localMapping.add(location, phiDef->value());
648             }
649             
650             if (verbose)
651                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
652             
653             // Process the block and replace all uses of loads with the promoted value.
654             for (Node* node : *block) {
655                 m_graph.performSubstitution(node);
656                 
657                 if (Node* escapee = m_materializationToEscapee.get(node))
658                     populateMaterialize(block, node, escapee);
659                 
660                 promoteHeapAccess(
661                     node,
662                     [&] (PromotedHeapLocation location, Edge value) {
663                         m_localMapping.set(location, value.node());
664                     },
665                     [&] (PromotedHeapLocation location) {
666                         node->replaceWith(resolve(block, location));
667                     });
668             }
669             
670             // Gotta drop some Upsilons.
671             size_t upsilonInsertionPoint = block->size() - 1;
672             NodeOrigin upsilonOrigin = block->last()->origin;
673             for (BasicBlock* successorBlock : block->successors()) {
674                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
675                     Node* phiNode = phiDef->value();
676                     SSACalculator::Variable* variable = phiDef->variable();
677                     PromotedHeapLocation location = m_indexToLocation[variable->index()];
678                     Node* incoming = resolve(block, location);
679                     
680                     m_insertionSet.insertNode(
681                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
682                         OpInfo(phiNode), incoming->defaultEdge());
683                 }
684             }
685             
686             m_insertionSet.execute(block);
687         }
688     }
689     
690     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
691     {
692         if (Node* result = m_localMapping.get(location))
693             return result;
694         
695         // This implies that there is no local mapping. Find a non-local mapping.
696         SSACalculator::Def* def = m_ssaCalculator.nonLocalReachingDef(
697             block, m_locationToVariable.get(location));
698         ASSERT(def);
699         ASSERT(def->value());
700         m_localMapping.add(location, def->value());
701         return def->value();
702     }
703
704     template<typename SinkCandidateFunctor, typename EscapeFunctor>
705     void handleNode(
706         Node* node,
707         const SinkCandidateFunctor& sinkCandidate,
708         const EscapeFunctor& escape)
709     {
710         switch (node->op()) {
711         case NewObject:
712         case MaterializeNewObject:
713             sinkCandidate();
714             m_graph.doToChildren(
715                 node,
716                 [&] (Edge edge) {
717                     escape(edge.node());
718                 });
719             break;
720             
721         case CheckStructure:
722         case GetByOffset:
723         case MultiGetByOffset:
724         case PutStructure:
725         case GetGetterSetterByOffset:
726         case MovHint:
727         case Phantom:
728         case Check:
729         case HardPhantom:
730         case StoreBarrier:
731         case StoreBarrierWithNullCheck:
732         case PutByOffsetHint:
733             break;
734             
735         case PutByOffset:
736             escape(node->child3().node());
737             break;
738             
739         case MultiPutByOffset:
740             // FIXME: In the future we should be able to handle this. It's just a matter of
741             // building the appropriate *Hint variant of this instruction, along with a
742             // PhantomStructureSelect node - since this transforms the Structure in a conditional
743             // way.
744             // https://bugs.webkit.org/show_bug.cgi?id=136924
745             escape(node->child1().node());
746             escape(node->child2().node());
747             break;
748
749         default:
750             m_graph.doToChildren(
751                 node,
752                 [&] (Edge edge) {
753                     escape(edge.node());
754                 });
755             break;
756         }
757     }
758     
759     Node* createMaterialize(Node* escapee, Node* where)
760     {
761         Node* result = nullptr;
762         
763         switch (escapee->op()) {
764         case NewObject:
765         case MaterializeNewObject: {
766             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
767             
768             result = m_graph.addNode(
769                 escapee->prediction(), Node::VarArg, MaterializeNewObject,
770                 NodeOrigin(
771                     escapee->origin.semantic,
772                     where->origin.forExit),
773                 OpInfo(data), OpInfo(), 0, 0);
774             break;
775         }
776             
777         default:
778             DFG_CRASH(m_graph, escapee, "Bad escapee op");
779             break;
780         }
781         
782         if (verbose)
783             dataLog("Creating materialization point at ", where, " for ", escapee, ": ", result, "\n");
784         
785         m_materializationToEscapee.add(result, escapee);
786         m_materializationSiteToMaterializations.add(
787             where, Vector<Node*>()).iterator->value.append(result);
788         
789         return result;
790     }
791     
792     void populateMaterialize(BasicBlock* block, Node* node, Node* escapee)
793     {
794         switch (node->op()) {
795         case MaterializeNewObject: {
796             ObjectMaterializationData& data = node->objectMaterializationData();
797             unsigned firstChild = m_graph.m_varArgChildren.size();
798             
799             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
800             
801             PromotedHeapLocation structure(StructurePLoc, escapee);
802             ASSERT(locations.contains(structure));
803             
804             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
805             
806             for (unsigned i = 0; i < locations.size(); ++i) {
807                 switch (locations[i].kind()) {
808                 case StructurePLoc: {
809                     ASSERT(locations[i] == structure);
810                     break;
811                 }
812                     
813                 case NamedPropertyPLoc: {
814                     Node* value = resolve(block, locations[i]);
815                     if (value->op() == BottomValue) {
816                         // We can skip Bottoms entirely.
817                         break;
818                     }
819                     
820                     data.m_properties.append(PhantomPropertyValue(locations[i].info()));
821                     m_graph.m_varArgChildren.append(value);
822                     break;
823                 }
824                     
825                 default:
826                     DFG_CRASH(m_graph, node, "Bad location kind");
827                 }
828             }
829             
830             node->children = AdjacencyList(
831                 AdjacencyList::Variable,
832                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
833             break;
834         }
835             
836         default:
837             DFG_CRASH(m_graph, node, "Bad materialize op");
838             break;
839         }
840     }
841     
842     SSACalculator m_ssaCalculator;
843     HashSet<Node*> m_sinkCandidates;
844     HashMap<Node*, Node*> m_materializationToEscapee;
845     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
846     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
847     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
848     Vector<PromotedHeapLocation> m_indexToLocation;
849     HashMap<PromotedHeapLocation, Node*> m_localMapping;
850     InsertionSet m_insertionSet;
851 };
852     
853 bool performObjectAllocationSinking(Graph& graph)
854 {
855     SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
856     return runPhase<ObjectAllocationSinkingPhase>(graph);
857 }
858
859 } } // namespace JSC::DFG
860
861 #endif // ENABLE(DFG_JIT)
862