DFG IR shouldn't have a separate node for every kind of put hint that could be descri...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractHeap.h"
32 #include "DFGBlockMapInlines.h"
33 #include "DFGClobberize.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertOSRHintsForUpdate.h"
36 #include "DFGInsertionSet.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromoteHeapAccess.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 namespace JSC { namespace DFG {
46
47 static bool verbose = false;
48
49 class ObjectAllocationSinkingPhase : public Phase {
50 public:
51     ObjectAllocationSinkingPhase(Graph& graph)
52         : Phase(graph, "object allocation sinking")
53         , m_ssaCalculator(graph)
54         , m_insertionSet(graph)
55     {
56     }
57     
58     bool run()
59     {
60         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
61         
62         m_graph.m_dominators.computeIfNecessary(m_graph);
63         
64         // Logically we wish to consider every NewObject and sink it. However it's probably not
65         // profitable to sink a NewObject that will always escape. So, first we do a very simple
66         // forward flow analysis that determines the set of NewObject nodes that have any chance
67         // of benefiting from object allocation sinking. Then we fixpoint the following rules:
68         //
69         // - For each NewObject, we turn the original NewObject into a PhantomNewObject and then
70         //   we insert MaterializeNewObject just before those escaping sites that come before any
71         //   other escaping sites - that is, there is no path between the allocation and those sites
72         //   that would see any other escape. Note that Upsilons constitute escaping sites. Then we
73         //   insert additional MaterializeNewObject nodes on Upsilons that feed into Phis that mix
74         //   materializations and the original PhantomNewObject. We then turn each PutByOffset over a
75         //   PhantomNewObject into a PutHint.
76         //
77         // - We perform the same optimization for MaterializeNewObject. This allows us to cover
78         //   cases where we had MaterializeNewObject flowing into a PutHint.
79         //
80         // We could also add this rule:
81         //
82         // - If all of the Upsilons of a Phi have a MaterializeNewObject that isn't used by anyone
83         //   else, then replace the Phi with the MaterializeNewObject.
84         //
85         //   FIXME: Implement this. Note that this totally doable but it requires some gnarly
86         //   code, and to be effective the pruner needs to be aware of it. Currently any Upsilon
87         //   is considered to be an escape even by the pruner, so it's unlikely that we'll see
88         //   many cases of Phi over Materializations.
89         //   https://bugs.webkit.org/show_bug.cgi?id=136927
90         
91         if (!performSinking())
92             return false;
93         
94         while (performSinking()) { }
95         
96         if (verbose) {
97             dataLog("Graph after sinking:\n");
98             m_graph.dump();
99         }
100         
101         return true;
102     }
103
104 private:
105     bool performSinking()
106     {
107         m_graph.computeRefCounts();
108         performLivenessAnalysis(m_graph);
109         performOSRAvailabilityAnalysis(m_graph);
110         
111         CString graphBeforeSinking;
112         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
113             StringPrintStream out;
114             m_graph.dump(out);
115             graphBeforeSinking = out.toCString();
116         }
117         
118         if (verbose) {
119             dataLog("Graph before sinking:\n");
120             m_graph.dump();
121         }
122         
123         determineMaterializationPoints();
124         if (m_sinkCandidates.isEmpty())
125             return false;
126         
127         // At this point we are committed to sinking the sinking candidates.
128         placeMaterializationPoints();
129         lowerNonReadingOperationsOnPhantomAllocations();
130         promoteSunkenFields();
131         
132         if (Options::validateGraphAtEachPhase())
133             validate(m_graph, DumpGraph, graphBeforeSinking);
134         
135         if (verbose)
136             dataLog("Sinking iteration changed the graph.\n");
137         return true;
138     }
139     
140     void determineMaterializationPoints()
141     {
142         // The premise of this pass is that if there exists a point in the program where some
143         // path from a phantom allocation site to that point causes materialization, then *all*
144         // paths cause materialization. This should mean that there are never any redundant
145         // materializations.
146         
147         m_sinkCandidates.clear();
148         m_materializationToEscapee.clear();
149         m_materializationSiteToMaterializations.clear();
150         
151         BlockMap<HashMap<Node*, bool>> materializedAtHead(m_graph);
152         BlockMap<HashMap<Node*, bool>> materializedAtTail(m_graph);
153         
154         bool changed;
155         do {
156             if (verbose)
157                 dataLog("Doing iteration of materialization point placement.\n");
158             changed = false;
159             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
160                 HashMap<Node*, bool> materialized = materializedAtHead[block];
161                 if (verbose)
162                     dataLog("    Looking at block ", pointerDump(block), "\n");
163                 for (Node* node : *block) {
164                     handleNode(
165                         node,
166                         [&] () {
167                             materialized.add(node, false);
168                         },
169                         [&] (Node* escapee) {
170                             auto iter = materialized.find(escapee);
171                             if (iter != materialized.end()) {
172                                 if (verbose)
173                                     dataLog("    ", escapee, " escapes at ", node, "\n");
174                                 iter->value = true;
175                             }
176                         });
177                 }
178                 
179                 if (verbose)
180                     dataLog("    Materialized at tail of ", pointerDump(block), ": ", mapDump(materialized), "\n");
181                 
182                 if (materialized == materializedAtTail[block])
183                     continue;
184                 
185                 materializedAtTail[block] = materialized;
186                 changed = true;
187                 
188                 // Only propagate things to our successors if they are alive in all successors.
189                 // So, we prune materialized-at-tail to only include things that are live.
190                 Vector<Node*> toRemove;
191                 for (auto pair : materialized) {
192                     if (!block->ssa->liveAtTail.contains(pair.key))
193                         toRemove.append(pair.key);
194                 }
195                 for (Node* key : toRemove)
196                     materialized.remove(key);
197                 
198                 for (BasicBlock* successorBlock : block->successors()) {
199                     for (auto pair : materialized) {
200                         materializedAtHead[successorBlock].add(
201                             pair.key, false).iterator->value |= pair.value;
202                     }
203                 }
204             }
205         } while (changed);
206         
207         // Determine the sink candidates. Broadly, a sink candidate is a node that handleNode()
208         // believes is sinkable, and one of the following is true:
209         //
210         // 1) There exists a basic block with only backward outgoing edges (or no outgoing edges)
211         //    in which the node wasn't materialized. This is meant to catch effectively-infinite
212         //    loops in which we don't need to have allocated the object.
213         //
214         // 2) There exists a basic block at the tail of which the node is not materialized and the
215         //    node is dead.
216         //
217         // 3) The sum of execution counts of the materializations is less than the sum of
218         //    execution counts of the original node.
219         //
220         // We currently implement only rule #2.
221         // FIXME: Implement the two other rules.
222         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
223         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
224         
225         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
226             for (auto pair : materializedAtTail[block]) {
227                 if (pair.value)
228                     continue; // It was materialized.
229                 
230                 if (block->ssa->liveAtTail.contains(pair.key))
231                     continue; // It might still get materialized in all of the successors.
232                 
233                 // We know that it died in this block and it wasn't materialized. That means that
234                 // if we sink this allocation, then *this* will be a path along which we never
235                 // have to allocate. Profit!
236                 m_sinkCandidates.add(pair.key);
237             }
238         }
239         
240         if (m_sinkCandidates.isEmpty())
241             return;
242         
243         // A materialization edge exists at any point where a node escapes but hasn't been
244         // materialized yet. We do this in two parts. First we find all of the nodes that cause
245         // escaping to happen, where the escapee had not yet been materialized. This catches
246         // everything but loops. We then catch loops - as well as weirder control flow constructs -
247         // in a subsequent pass that looks at places in the CFG where an edge exists from a block
248         // that hasn't materialized to a block that has. We insert the materialization along such an
249         // edge, and we rely on the fact that critical edges were already broken so that we actually
250         // either insert the materialization at the head of the successor or the tail of the
251         // predecessor.
252         //
253         // FIXME: This can create duplicate allocations when we really only needed to perform one.
254         // For example:
255         //
256         //     var o = new Object();
257         //     if (rare) {
258         //         if (stuff)
259         //             call(o); // o escapes here.
260         //         return;
261         //     }
262         //     // o doesn't escape down here.
263         //
264         // In this example, we would place a materialization point at call(o) and then we would find
265         // ourselves having to insert another one at the implicit else case of that if statement
266         // ('cause we've broken critical edges). We would instead really like to just have one
267         // materialization point right at the top of the then case of "if (rare)". To do this, we can
268         // find the LCA of the various materializations in the dom tree.
269         // https://bugs.webkit.org/show_bug.cgi?id=137124
270         
271         // First pass: find intra-block materialization points.
272         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
273             HashSet<Node*> materialized;
274             for (auto pair : materializedAtHead[block]) {
275                 if (pair.value && m_sinkCandidates.contains(pair.key))
276                     materialized.add(pair.key);
277             }
278             
279             if (verbose)
280                 dataLog("Placing materialization points in ", pointerDump(block), " with materialization set ", listDump(materialized), "\n");
281             
282             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
283                 Node* node = block->at(nodeIndex);
284                 
285                 handleNode(
286                     node,
287                     [&] () { },
288                     [&] (Node* escapee) {
289                         if (verbose)
290                             dataLog("Seeing ", escapee, " escape at ", node, "\n");
291                         
292                         if (!m_sinkCandidates.contains(escapee)) {
293                             if (verbose)
294                                 dataLog("    It's not a sink candidate.\n");
295                             return;
296                         }
297                         
298                         if (!materialized.add(escapee).isNewEntry) {
299                             if (verbose)
300                                 dataLog("   It's already materialized.\n");
301                             return;
302                         }
303                         
304                         createMaterialize(escapee, node);
305                     });
306             }
307         }
308         
309         // Second pass: find CFG edge materialization points.
310         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
311             for (BasicBlock* successorBlock : block->successors()) {
312                 for (auto pair : materializedAtHead[successorBlock]) {
313                     Node* allocation = pair.key;
314                     
315                     // We only care if it is materialized in the successor.
316                     if (!pair.value)
317                         continue;
318                     
319                     // We only care about sinking candidates.
320                     if (!m_sinkCandidates.contains(allocation))
321                         continue;
322                     
323                     // We only care if it isn't materialized in the predecessor.
324                     if (materializedAtTail[block].get(allocation))
325                         continue;
326                     
327                     // We need to decide if we put the materialization at the head of the successor,
328                     // or the tail of the predecessor. It needs to be placed so that the allocation
329                     // is never materialized before, and always materialized after.
330                     
331                     // Is it never materialized in any of successor's predecessors? I like to think
332                     // of "successors' predecessors" and "predecessor's successors" as the "shadow",
333                     // because of what it looks like when you draw it.
334                     bool neverMaterializedInShadow = true;
335                     for (BasicBlock* shadowBlock : successorBlock->predecessors) {
336                         if (materializedAtTail[shadowBlock].get(allocation)) {
337                             neverMaterializedInShadow = false;
338                             break;
339                         }
340                     }
341                     
342                     if (neverMaterializedInShadow) {
343                         createMaterialize(allocation, successorBlock->firstOriginNode());
344                         continue;
345                     }
346                     
347                     // Because we had broken critical edges, it must be the case that the
348                     // predecessor's successors all materialize the object. This is because the
349                     // previous case is guaranteed to catch the case where the successor only has
350                     // one predecessor. When critical edges are broken, this is also the only case
351                     // where the predecessor could have had multiple successors. Therefore we have
352                     // already handled the case where the predecessor has multiple successors.
353                     DFG_ASSERT(m_graph, block, block->numSuccessors() == 1);
354                     
355                     createMaterialize(allocation, block->last());
356                 }
357             }
358         }
359     }
360     
361     void placeMaterializationPoints()
362     {
363         m_ssaCalculator.reset();
364         
365         // The "variables" are the object allocations that we are sinking. So, nodeToVariable maps
366         // sink candidates (aka escapees) to the SSACalculator's notion of Variable, and indexToNode
367         // maps in the opposite direction using the SSACalculator::Variable::index() as the key.
368         HashMap<Node*, SSACalculator::Variable*> nodeToVariable;
369         Vector<Node*> indexToNode;
370         
371         for (Node* node : m_sinkCandidates) {
372             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
373             nodeToVariable.add(node, variable);
374             ASSERT(indexToNode.size() == variable->index());
375             indexToNode.append(node);
376         }
377         
378         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
379             for (Node* node : *block) {
380                 if (SSACalculator::Variable* variable = nodeToVariable.get(node))
381                     m_ssaCalculator.newDef(variable, block, node);
382                 
383                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
384                     m_ssaCalculator.newDef(
385                         nodeToVariable.get(m_materializationToEscapee.get(materialize)),
386                         block, materialize);
387                 }
388             }
389         }
390         
391         m_ssaCalculator.computePhis(
392             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
393                 Node* allocation = indexToNode[variable->index()];
394                 if (!block->ssa->liveAtHead.contains(allocation))
395                     return nullptr;
396                 
397                 Node* phiNode = m_graph.addNode(allocation->prediction(), Phi, NodeOrigin());
398                 phiNode->mergeFlags(NodeResultJS);
399                 return phiNode;
400             });
401         
402         // Place Phis in the right places. Replace all uses of any allocation with the appropriate
403         // materialization. Create the appropriate Upsilon nodes.
404         LocalOSRAvailabilityCalculator availabilityCalculator;
405         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
406             HashMap<Node*, Node*> mapping;
407             
408             for (Node* candidate : block->ssa->liveAtHead) {
409                 SSACalculator::Variable* variable = nodeToVariable.get(candidate);
410                 if (!variable)
411                     continue;
412                 
413                 SSACalculator::Def* def = m_ssaCalculator.reachingDefAtHead(block, variable);
414                 if (!def)
415                     continue;
416                 
417                 mapping.set(indexToNode[variable->index()], def->value());
418             }
419             
420             availabilityCalculator.beginBlock(block);
421             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
422                 m_insertionSet.insert(0, phiDef->value());
423                 
424                 Node* originalNode = indexToNode[phiDef->variable()->index()];
425                 insertOSRHintsForUpdate(
426                     m_insertionSet, 0, NodeOrigin(), availabilityCalculator.m_availability,
427                     originalNode, phiDef->value());
428
429                 mapping.set(originalNode, phiDef->value());
430             }
431             
432             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
433                 Node* node = block->at(nodeIndex);
434
435                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
436                     Node* escapee = m_materializationToEscapee.get(materialize);
437                     m_insertionSet.insert(nodeIndex, materialize);
438                     insertOSRHintsForUpdate(
439                         m_insertionSet, nodeIndex, node->origin,
440                         availabilityCalculator.m_availability, escapee, materialize);
441                     mapping.set(escapee, materialize);
442                 }
443                     
444                 availabilityCalculator.executeNode(node);
445                 
446                 m_graph.doToChildren(
447                     node,
448                     [&] (Edge& edge) {
449                         if (Node* materialize = mapping.get(edge.node()))
450                             edge.setNode(materialize);
451                     });
452                 
453                 // If you cause an escape, you shouldn't see the original escapee.
454                 if (validationEnabled()) {
455                     handleNode(
456                         node,
457                         [&] () { },
458                         [&] (Node* escapee) {
459                             DFG_ASSERT(m_graph, node, !m_sinkCandidates.contains(escapee));
460                         });
461                 }
462             }
463             
464             size_t upsilonInsertionPoint = block->size() - 1;
465             Node* upsilonWhere = block->last();
466             NodeOrigin upsilonOrigin = upsilonWhere->origin;
467             for (BasicBlock* successorBlock : block->successors()) {
468                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
469                     Node* phiNode = phiDef->value();
470                     SSACalculator::Variable* variable = phiDef->variable();
471                     Node* allocation = indexToNode[variable->index()];
472                     
473                     Node* originalIncoming = mapping.get(allocation);
474                     Node* incoming;
475                     if (originalIncoming == allocation) {
476                         // If we have a Phi that combines materializations with the original
477                         // phantom object, then the path with the phantom object must materialize.
478                         
479                         incoming = createMaterialize(allocation, upsilonWhere);
480                         m_insertionSet.insert(upsilonInsertionPoint, incoming);
481                         insertOSRHintsForUpdate(
482                             m_insertionSet, upsilonInsertionPoint, upsilonOrigin,
483                             availabilityCalculator.m_availability, originalIncoming, incoming);
484                     } else
485                         incoming = originalIncoming;
486                     
487                     m_insertionSet.insertNode(
488                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
489                         OpInfo(phiNode), incoming->defaultEdge());
490                 }
491             }
492             
493             m_insertionSet.execute(block);
494         }
495         
496         // At this point we have dummy materialization nodes along with edges to them. This means
497         // that the part of the control flow graph that prefers to see actual object allocations
498         // is completely fixed up, except for the materializations themselves.
499     }
500     
501     void lowerNonReadingOperationsOnPhantomAllocations()
502     {
503         // Lower everything but reading operations on phantom allocations. We absolutely have to
504         // lower all writes so as to reveal them to the SSA calculator. We cannot lower reads
505         // because the whole point is that those go away completely.
506         
507         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
508             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
509                 Node* node = block->at(nodeIndex);
510                 switch (node->op()) {
511                 case PutByOffset: {
512                     if (m_sinkCandidates.contains(node->child2().node()))
513                         node->convertToPutByOffsetHint();
514                     break;
515                 }
516                     
517                 case PutStructure: {
518                     if (m_sinkCandidates.contains(node->child1().node())) {
519                         Node* structure = m_insertionSet.insertConstant(
520                             nodeIndex, node->origin, JSValue(node->transition()->next));
521                         node->convertToPutStructureHint(structure);
522                     }
523                     break;
524                 }
525                     
526                 case NewObject: {
527                     if (m_sinkCandidates.contains(node)) {
528                         Node* structure = m_insertionSet.insertConstant(
529                             nodeIndex + 1, node->origin, JSValue(node->structure()));
530                         m_insertionSet.insert(
531                             nodeIndex + 1,
532                             PromotedHeapLocation(StructurePLoc, node).createHint(
533                                 m_graph, node->origin, structure));
534                         node->convertToPhantomNewObject();
535                     }
536                     break;
537                 }
538                     
539                 case MaterializeNewObject: {
540                     if (m_sinkCandidates.contains(node)) {
541                         m_insertionSet.insert(
542                             nodeIndex + 1,
543                             PromotedHeapLocation(StructurePLoc, node).createHint(
544                                 m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
545                         for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
546                             unsigned identifierNumber =
547                                 node->objectMaterializationData().m_properties[i].m_identifierNumber;
548                             m_insertionSet.insert(
549                                 nodeIndex + 1,
550                                 PromotedHeapLocation(
551                                     NamedPropertyPLoc, node, identifierNumber).createHint(
552                                     m_graph, node->origin,
553                                     m_graph.varArgChild(node, i + 1).node()));
554                         }
555                         node->convertToPhantomNewObject();
556                     }
557                     break;
558                 }
559                     
560                 case StoreBarrier:
561                 case StoreBarrierWithNullCheck: {
562                     if (m_sinkCandidates.contains(node->child1().node()))
563                         node->convertToPhantom();
564                     break;
565                 }
566                     
567                 default:
568                     break;
569                 }
570                 
571                 m_graph.doToChildren(
572                     node,
573                     [&] (Edge& edge) {
574                         if (m_sinkCandidates.contains(edge.node()))
575                             edge.setUseKind(KnownCellUse);
576                     });
577             }
578             m_insertionSet.execute(block);
579         }
580     }
581     
582     void promoteSunkenFields()
583     {
584         // Collect the set of heap locations that we will be operating over.
585         HashSet<PromotedHeapLocation> locations;
586         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
587             for (Node* node : *block) {
588                 promoteHeapAccess(
589                     node,
590                     [&] (PromotedHeapLocation location, Edge) {
591                         if (m_sinkCandidates.contains(location.base()))
592                             locations.add(location);
593                     },
594                     [&] (PromotedHeapLocation location) {
595                         if (m_sinkCandidates.contains(location.base()))
596                             locations.add(location);
597                     });
598             }
599         }
600         
601         // Figure out which locations belong to which allocations.
602         m_locationsForAllocation.clear();
603         for (PromotedHeapLocation location : locations) {
604             auto result = m_locationsForAllocation.add(location.base(), Vector<PromotedHeapLocation>());
605             ASSERT(!result.iterator->value.contains(location));
606             result.iterator->value.append(location);
607         }
608         
609         // For each sunken thingy, make sure we create Bottom values for all of its fields.
610         // Note that this has the hilarious slight inefficiency of creating redundant hints for
611         // things that were previously materializations. This should only impact compile times and
612         // not code quality, and it's necessary for soundness without some data structure hackage.
613         // For example, a MaterializeNewObject that we choose to sink may have new fields added to
614         // it conditionally. That would necessitate Bottoms.
615         Node* bottom = nullptr;
616         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
617             if (block == m_graph.block(0))
618                 bottom = m_insertionSet.insertNode(0, SpecNone, BottomValue, NodeOrigin());
619             
620             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
621                 Node* node = block->at(nodeIndex);
622                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
623                     m_insertionSet.insert(
624                         nodeIndex + 1, location.createHint(m_graph, node->origin, bottom));
625                 }
626             }
627             m_insertionSet.execute(block);
628         }
629
630         m_ssaCalculator.reset();
631
632         // Collect the set of "variables" that we will be sinking.
633         m_locationToVariable.clear();
634         m_indexToLocation.clear();
635         for (PromotedHeapLocation location : locations) {
636             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
637             m_locationToVariable.add(location, variable);
638             ASSERT(m_indexToLocation.size() == variable->index());
639             m_indexToLocation.append(location);
640         }
641         
642         // Create Defs from the existing hints.
643         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
644             for (Node* node : *block) {
645                 promoteHeapAccess(
646                     node,
647                     [&] (PromotedHeapLocation location, Edge value) {
648                         if (!m_sinkCandidates.contains(location.base()))
649                             return;
650                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
651                         m_ssaCalculator.newDef(variable, block, value.node());
652                     },
653                     [&] (PromotedHeapLocation) { });
654             }
655         }
656         
657         // OMG run the SSA calculator to create Phis!
658         m_ssaCalculator.computePhis(
659             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
660                 PromotedHeapLocation location = m_indexToLocation[variable->index()];
661                 if (!block->ssa->liveAtHead.contains(location.base()))
662                     return nullptr;
663                 
664                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
665                 phiNode->mergeFlags(NodeResultJS);
666                 return phiNode;
667             });
668         
669         // Place Phis in the right places, replace all uses of any load with the appropriate
670         // value, and create the appropriate Upsilon nodes.
671         m_graph.clearReplacements();
672         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
673             // This mapping table is intended to be lazy. If something is omitted from the table,
674             // it means that there haven't been any local stores to that promoted heap location.
675             m_localMapping.clear();
676             
677             // Insert the Phi functions that we had previously created.
678             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
679                 PromotedHeapLocation location = m_indexToLocation[phiDef->variable()->index()];
680                 
681                 m_insertionSet.insert(
682                     0, phiDef->value());
683                 m_insertionSet.insert(
684                     0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
685                 m_localMapping.add(location, phiDef->value());
686             }
687             
688             if (verbose)
689                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
690             
691             // Process the block and replace all uses of loads with the promoted value.
692             for (Node* node : *block) {
693                 m_graph.performSubstitution(node);
694                 
695                 if (Node* escapee = m_materializationToEscapee.get(node))
696                     populateMaterialize(block, node, escapee);
697                 
698                 promoteHeapAccess(
699                     node,
700                     [&] (PromotedHeapLocation location, Edge value) {
701                         if (m_sinkCandidates.contains(location.base()))
702                             m_localMapping.set(location, value.node());
703                     },
704                     [&] (PromotedHeapLocation location) {
705                         if (m_sinkCandidates.contains(location.base()))
706                             node->replaceWith(resolve(block, location));
707                     });
708             }
709             
710             // Gotta drop some Upsilons.
711             size_t upsilonInsertionPoint = block->size() - 1;
712             NodeOrigin upsilonOrigin = block->last()->origin;
713             for (BasicBlock* successorBlock : block->successors()) {
714                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
715                     Node* phiNode = phiDef->value();
716                     SSACalculator::Variable* variable = phiDef->variable();
717                     PromotedHeapLocation location = m_indexToLocation[variable->index()];
718                     Node* incoming = resolve(block, location);
719                     
720                     m_insertionSet.insertNode(
721                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
722                         OpInfo(phiNode), incoming->defaultEdge());
723                 }
724             }
725             
726             m_insertionSet.execute(block);
727         }
728     }
729     
730     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
731     {
732         if (Node* result = m_localMapping.get(location))
733             return result;
734         
735         // This implies that there is no local mapping. Find a non-local mapping.
736         SSACalculator::Def* def = m_ssaCalculator.nonLocalReachingDef(
737             block, m_locationToVariable.get(location));
738         ASSERT(def);
739         ASSERT(def->value());
740         m_localMapping.add(location, def->value());
741         return def->value();
742     }
743
744     template<typename SinkCandidateFunctor, typename EscapeFunctor>
745     void handleNode(
746         Node* node,
747         const SinkCandidateFunctor& sinkCandidate,
748         const EscapeFunctor& escape)
749     {
750         switch (node->op()) {
751         case NewObject:
752         case MaterializeNewObject:
753             sinkCandidate();
754             m_graph.doToChildren(
755                 node,
756                 [&] (Edge edge) {
757                     escape(edge.node());
758                 });
759             break;
760             
761         case CheckStructure:
762         case GetByOffset:
763         case MultiGetByOffset:
764         case PutStructure:
765         case GetGetterSetterByOffset:
766         case MovHint:
767         case Phantom:
768         case Check:
769         case HardPhantom:
770         case StoreBarrier:
771         case StoreBarrierWithNullCheck:
772         case PutHint:
773             break;
774             
775         case PutByOffset:
776             escape(node->child3().node());
777             break;
778             
779         case MultiPutByOffset:
780             // FIXME: In the future we should be able to handle this. It's just a matter of
781             // building the appropriate *Hint variant of this instruction, along with a
782             // PhantomStructureSelect node - since this transforms the Structure in a conditional
783             // way.
784             // https://bugs.webkit.org/show_bug.cgi?id=136924
785             escape(node->child1().node());
786             escape(node->child2().node());
787             break;
788
789         default:
790             m_graph.doToChildren(
791                 node,
792                 [&] (Edge edge) {
793                     escape(edge.node());
794                 });
795             break;
796         }
797     }
798     
799     Node* createMaterialize(Node* escapee, Node* where)
800     {
801         Node* result = nullptr;
802         
803         switch (escapee->op()) {
804         case NewObject:
805         case MaterializeNewObject: {
806             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
807             
808             result = m_graph.addNode(
809                 escapee->prediction(), Node::VarArg, MaterializeNewObject,
810                 NodeOrigin(
811                     escapee->origin.semantic,
812                     where->origin.forExit),
813                 OpInfo(data), OpInfo(), 0, 0);
814             break;
815         }
816             
817         default:
818             DFG_CRASH(m_graph, escapee, "Bad escapee op");
819             break;
820         }
821         
822         if (verbose)
823             dataLog("Creating materialization point at ", where, " for ", escapee, ": ", result, "\n");
824         
825         m_materializationToEscapee.add(result, escapee);
826         m_materializationSiteToMaterializations.add(
827             where, Vector<Node*>()).iterator->value.append(result);
828         
829         return result;
830     }
831     
832     void populateMaterialize(BasicBlock* block, Node* node, Node* escapee)
833     {
834         switch (node->op()) {
835         case MaterializeNewObject: {
836             ObjectMaterializationData& data = node->objectMaterializationData();
837             unsigned firstChild = m_graph.m_varArgChildren.size();
838             
839             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
840             
841             PromotedHeapLocation structure(StructurePLoc, escapee);
842             ASSERT(locations.contains(structure));
843             
844             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
845             
846             for (unsigned i = 0; i < locations.size(); ++i) {
847                 switch (locations[i].kind()) {
848                 case StructurePLoc: {
849                     ASSERT(locations[i] == structure);
850                     break;
851                 }
852                     
853                 case NamedPropertyPLoc: {
854                     Node* value = resolve(block, locations[i]);
855                     if (value->op() == BottomValue) {
856                         // We can skip Bottoms entirely.
857                         break;
858                     }
859                     
860                     data.m_properties.append(PhantomPropertyValue(locations[i].info()));
861                     m_graph.m_varArgChildren.append(value);
862                     break;
863                 }
864                     
865                 default:
866                     DFG_CRASH(m_graph, node, "Bad location kind");
867                 }
868             }
869             
870             node->children = AdjacencyList(
871                 AdjacencyList::Variable,
872                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
873             break;
874         }
875             
876         default:
877             DFG_CRASH(m_graph, node, "Bad materialize op");
878             break;
879         }
880     }
881     
882     SSACalculator m_ssaCalculator;
883     HashSet<Node*> m_sinkCandidates;
884     HashMap<Node*, Node*> m_materializationToEscapee;
885     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
886     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
887     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
888     Vector<PromotedHeapLocation> m_indexToLocation;
889     HashMap<PromotedHeapLocation, Node*> m_localMapping;
890     InsertionSet m_insertionSet;
891 };
892     
893 bool performObjectAllocationSinking(Graph& graph)
894 {
895     SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
896     return runPhase<ObjectAllocationSinkingPhase>(graph);
897 }
898
899 } } // namespace JSC::DFG
900
901 #endif // ENABLE(DFG_JIT)
902