Rename HardPhantom to MustGenerate.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractHeap.h"
32 #include "DFGBlockMapInlines.h"
33 #include "DFGClobberize.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertOSRHintsForUpdate.h"
36 #include "DFGInsertionSet.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromoteHeapAccess.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 namespace JSC { namespace DFG {
46
47 static bool verbose = false;
48
49 class ObjectAllocationSinkingPhase : public Phase {
50 public:
51     ObjectAllocationSinkingPhase(Graph& graph)
52         : Phase(graph, "object allocation sinking")
53         , m_ssaCalculator(graph)
54         , m_insertionSet(graph)
55     {
56     }
57     
58     bool run()
59     {
60         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
61         
62         m_graph.m_dominators.computeIfNecessary(m_graph);
63         
64         // Logically we wish to consider every NewObject and sink it. However it's probably not
65         // profitable to sink a NewObject that will always escape. So, first we do a very simple
66         // forward flow analysis that determines the set of NewObject nodes that have any chance
67         // of benefiting from object allocation sinking. Then we fixpoint the following rules:
68         //
69         // - For each NewObject, we turn the original NewObject into a PhantomNewObject and then
70         //   we insert MaterializeNewObject just before those escaping sites that come before any
71         //   other escaping sites - that is, there is no path between the allocation and those sites
72         //   that would see any other escape. Note that Upsilons constitute escaping sites. Then we
73         //   insert additional MaterializeNewObject nodes on Upsilons that feed into Phis that mix
74         //   materializations and the original PhantomNewObject. We then turn each PutByOffset over a
75         //   PhantomNewObject into a PutHint.
76         //
77         // - We perform the same optimization for MaterializeNewObject. This allows us to cover
78         //   cases where we had MaterializeNewObject flowing into a PutHint.
79         //
80         // We could also add this rule:
81         //
82         // - If all of the Upsilons of a Phi have a MaterializeNewObject that isn't used by anyone
83         //   else, then replace the Phi with the MaterializeNewObject.
84         //
85         //   FIXME: Implement this. Note that this totally doable but it requires some gnarly
86         //   code, and to be effective the pruner needs to be aware of it. Currently any Upsilon
87         //   is considered to be an escape even by the pruner, so it's unlikely that we'll see
88         //   many cases of Phi over Materializations.
89         //   https://bugs.webkit.org/show_bug.cgi?id=136927
90         
91         if (!performSinking())
92             return false;
93         
94         while (performSinking()) { }
95         
96         if (verbose) {
97             dataLog("Graph after sinking:\n");
98             m_graph.dump();
99         }
100         
101         return true;
102     }
103
104 private:
105     bool performSinking()
106     {
107         m_graph.computeRefCounts();
108         performLivenessAnalysis(m_graph);
109         performOSRAvailabilityAnalysis(m_graph);
110         
111         CString graphBeforeSinking;
112         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
113             StringPrintStream out;
114             m_graph.dump(out);
115             graphBeforeSinking = out.toCString();
116         }
117         
118         if (verbose) {
119             dataLog("Graph before sinking:\n");
120             m_graph.dump();
121         }
122         
123         determineMaterializationPoints();
124         if (m_sinkCandidates.isEmpty())
125             return false;
126         
127         // At this point we are committed to sinking the sinking candidates.
128         placeMaterializationPoints();
129         lowerNonReadingOperationsOnPhantomAllocations();
130         promoteSunkenFields();
131         
132         if (Options::validateGraphAtEachPhase())
133             validate(m_graph, DumpGraph, graphBeforeSinking);
134         
135         if (verbose)
136             dataLog("Sinking iteration changed the graph.\n");
137         return true;
138     }
139     
140     void determineMaterializationPoints()
141     {
142         // The premise of this pass is that if there exists a point in the program where some
143         // path from a phantom allocation site to that point causes materialization, then *all*
144         // paths cause materialization. This should mean that there are never any redundant
145         // materializations.
146         
147         m_sinkCandidates.clear();
148         m_materializationToEscapee.clear();
149         m_materializationSiteToMaterializations.clear();
150         
151         BlockMap<HashMap<Node*, bool>> materializedAtHead(m_graph);
152         BlockMap<HashMap<Node*, bool>> materializedAtTail(m_graph);
153         
154         bool changed;
155         do {
156             if (verbose)
157                 dataLog("Doing iteration of materialization point placement.\n");
158             changed = false;
159             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
160                 HashMap<Node*, bool> materialized = materializedAtHead[block];
161                 if (verbose)
162                     dataLog("    Looking at block ", pointerDump(block), "\n");
163                 for (Node* node : *block) {
164                     handleNode(
165                         node,
166                         [&] () {
167                             materialized.add(node, false);
168                         },
169                         [&] (Node* escapee) {
170                             auto iter = materialized.find(escapee);
171                             if (iter != materialized.end()) {
172                                 if (verbose)
173                                     dataLog("    ", escapee, " escapes at ", node, "\n");
174                                 iter->value = true;
175                             }
176                         });
177                 }
178                 
179                 if (verbose)
180                     dataLog("    Materialized at tail of ", pointerDump(block), ": ", mapDump(materialized), "\n");
181                 
182                 if (materialized == materializedAtTail[block])
183                     continue;
184                 
185                 materializedAtTail[block] = materialized;
186                 changed = true;
187                 
188                 // Only propagate things to our successors if they are alive in all successors.
189                 // So, we prune materialized-at-tail to only include things that are live.
190                 Vector<Node*> toRemove;
191                 for (auto pair : materialized) {
192                     if (!block->ssa->liveAtTail.contains(pair.key))
193                         toRemove.append(pair.key);
194                 }
195                 for (Node* key : toRemove)
196                     materialized.remove(key);
197                 
198                 for (BasicBlock* successorBlock : block->successors()) {
199                     for (auto pair : materialized) {
200                         materializedAtHead[successorBlock].add(
201                             pair.key, false).iterator->value |= pair.value;
202                     }
203                 }
204             }
205         } while (changed);
206         
207         // Determine the sink candidates. Broadly, a sink candidate is a node that handleNode()
208         // believes is sinkable, and one of the following is true:
209         //
210         // 1) There exists a basic block with only backward outgoing edges (or no outgoing edges)
211         //    in which the node wasn't materialized. This is meant to catch effectively-infinite
212         //    loops in which we don't need to have allocated the object.
213         //
214         // 2) There exists a basic block at the tail of which the node is not materialized and the
215         //    node is dead.
216         //
217         // 3) The sum of execution counts of the materializations is less than the sum of
218         //    execution counts of the original node.
219         //
220         // We currently implement only rule #2.
221         // FIXME: Implement the two other rules.
222         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
223         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
224         
225         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
226             for (auto pair : materializedAtTail[block]) {
227                 if (pair.value)
228                     continue; // It was materialized.
229                 
230                 if (block->ssa->liveAtTail.contains(pair.key))
231                     continue; // It might still get materialized in all of the successors.
232                 
233                 // We know that it died in this block and it wasn't materialized. That means that
234                 // if we sink this allocation, then *this* will be a path along which we never
235                 // have to allocate. Profit!
236                 m_sinkCandidates.add(pair.key);
237             }
238         }
239         
240         if (m_sinkCandidates.isEmpty())
241             return;
242         
243         // A materialization edge exists at any point where a node escapes but hasn't been
244         // materialized yet. We do this in two parts. First we find all of the nodes that cause
245         // escaping to happen, where the escapee had not yet been materialized. This catches
246         // everything but loops. We then catch loops - as well as weirder control flow constructs -
247         // in a subsequent pass that looks at places in the CFG where an edge exists from a block
248         // that hasn't materialized to a block that has. We insert the materialization along such an
249         // edge, and we rely on the fact that critical edges were already broken so that we actually
250         // either insert the materialization at the head of the successor or the tail of the
251         // predecessor.
252         //
253         // FIXME: This can create duplicate allocations when we really only needed to perform one.
254         // For example:
255         //
256         //     var o = new Object();
257         //     if (rare) {
258         //         if (stuff)
259         //             call(o); // o escapes here.
260         //         return;
261         //     }
262         //     // o doesn't escape down here.
263         //
264         // In this example, we would place a materialization point at call(o) and then we would find
265         // ourselves having to insert another one at the implicit else case of that if statement
266         // ('cause we've broken critical edges). We would instead really like to just have one
267         // materialization point right at the top of the then case of "if (rare)". To do this, we can
268         // find the LCA of the various materializations in the dom tree.
269         // https://bugs.webkit.org/show_bug.cgi?id=137124
270         
271         // First pass: find intra-block materialization points.
272         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
273             HashSet<Node*> materialized;
274             for (auto pair : materializedAtHead[block]) {
275                 if (pair.value && m_sinkCandidates.contains(pair.key))
276                     materialized.add(pair.key);
277             }
278             
279             if (verbose)
280                 dataLog("Placing materialization points in ", pointerDump(block), " with materialization set ", listDump(materialized), "\n");
281             
282             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
283                 Node* node = block->at(nodeIndex);
284                 
285                 handleNode(
286                     node,
287                     [&] () { },
288                     [&] (Node* escapee) {
289                         if (verbose)
290                             dataLog("Seeing ", escapee, " escape at ", node, "\n");
291                         
292                         if (!m_sinkCandidates.contains(escapee)) {
293                             if (verbose)
294                                 dataLog("    It's not a sink candidate.\n");
295                             return;
296                         }
297                         
298                         if (!materialized.add(escapee).isNewEntry) {
299                             if (verbose)
300                                 dataLog("   It's already materialized.\n");
301                             return;
302                         }
303                         
304                         createMaterialize(escapee, node);
305                     });
306             }
307         }
308         
309         // Second pass: find CFG edge materialization points.
310         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
311             for (BasicBlock* successorBlock : block->successors()) {
312                 for (auto pair : materializedAtHead[successorBlock]) {
313                     Node* allocation = pair.key;
314                     
315                     // We only care if it is materialized in the successor.
316                     if (!pair.value)
317                         continue;
318                     
319                     // We only care about sinking candidates.
320                     if (!m_sinkCandidates.contains(allocation))
321                         continue;
322                     
323                     // We only care if it isn't materialized in the predecessor.
324                     if (materializedAtTail[block].get(allocation))
325                         continue;
326                     
327                     // We need to decide if we put the materialization at the head of the successor,
328                     // or the tail of the predecessor. It needs to be placed so that the allocation
329                     // is never materialized before, and always materialized after.
330                     
331                     // Is it never materialized in any of successor's predecessors? I like to think
332                     // of "successors' predecessors" and "predecessor's successors" as the "shadow",
333                     // because of what it looks like when you draw it.
334                     bool neverMaterializedInShadow = true;
335                     for (BasicBlock* shadowBlock : successorBlock->predecessors) {
336                         if (materializedAtTail[shadowBlock].get(allocation)) {
337                             neverMaterializedInShadow = false;
338                             break;
339                         }
340                     }
341                     
342                     if (neverMaterializedInShadow) {
343                         createMaterialize(allocation, successorBlock->firstOriginNode());
344                         continue;
345                     }
346                     
347                     // Because we had broken critical edges, it must be the case that the
348                     // predecessor's successors all materialize the object. This is because the
349                     // previous case is guaranteed to catch the case where the successor only has
350                     // one predecessor. When critical edges are broken, this is also the only case
351                     // where the predecessor could have had multiple successors. Therefore we have
352                     // already handled the case where the predecessor has multiple successors.
353                     DFG_ASSERT(m_graph, block, block->numSuccessors() == 1);
354                     
355                     createMaterialize(allocation, block->terminal());
356                 }
357             }
358         }
359     }
360     
361     void placeMaterializationPoints()
362     {
363         m_ssaCalculator.reset();
364         
365         // The "variables" are the object allocations that we are sinking. So, nodeToVariable maps
366         // sink candidates (aka escapees) to the SSACalculator's notion of Variable, and indexToNode
367         // maps in the opposite direction using the SSACalculator::Variable::index() as the key.
368         HashMap<Node*, SSACalculator::Variable*> nodeToVariable;
369         Vector<Node*> indexToNode;
370         
371         for (Node* node : m_sinkCandidates) {
372             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
373             nodeToVariable.add(node, variable);
374             ASSERT(indexToNode.size() == variable->index());
375             indexToNode.append(node);
376         }
377         
378         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
379             for (Node* node : *block) {
380                 if (SSACalculator::Variable* variable = nodeToVariable.get(node))
381                     m_ssaCalculator.newDef(variable, block, node);
382                 
383                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
384                     m_ssaCalculator.newDef(
385                         nodeToVariable.get(m_materializationToEscapee.get(materialize)),
386                         block, materialize);
387                 }
388             }
389         }
390         
391         m_ssaCalculator.computePhis(
392             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
393                 Node* allocation = indexToNode[variable->index()];
394                 if (!block->ssa->liveAtHead.contains(allocation))
395                     return nullptr;
396                 
397                 Node* phiNode = m_graph.addNode(allocation->prediction(), Phi, NodeOrigin());
398                 phiNode->mergeFlags(NodeResultJS);
399                 return phiNode;
400             });
401         
402         // Place Phis in the right places. Replace all uses of any allocation with the appropriate
403         // materialization. Create the appropriate Upsilon nodes.
404         LocalOSRAvailabilityCalculator availabilityCalculator;
405         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
406             HashMap<Node*, Node*> mapping;
407             
408             for (Node* candidate : block->ssa->liveAtHead) {
409                 SSACalculator::Variable* variable = nodeToVariable.get(candidate);
410                 if (!variable)
411                     continue;
412                 
413                 SSACalculator::Def* def = m_ssaCalculator.reachingDefAtHead(block, variable);
414                 if (!def)
415                     continue;
416                 
417                 mapping.set(indexToNode[variable->index()], def->value());
418             }
419             
420             availabilityCalculator.beginBlock(block);
421             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
422                 m_insertionSet.insert(0, phiDef->value());
423                 
424                 Node* originalNode = indexToNode[phiDef->variable()->index()];
425                 insertOSRHintsForUpdate(
426                     m_insertionSet, 0, NodeOrigin(), availabilityCalculator.m_availability,
427                     originalNode, phiDef->value());
428
429                 mapping.set(originalNode, phiDef->value());
430             }
431             
432             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
433                 Node* node = block->at(nodeIndex);
434
435                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
436                     Node* escapee = m_materializationToEscapee.get(materialize);
437                     m_insertionSet.insert(nodeIndex, materialize);
438                     insertOSRHintsForUpdate(
439                         m_insertionSet, nodeIndex, node->origin,
440                         availabilityCalculator.m_availability, escapee, materialize);
441                     mapping.set(escapee, materialize);
442                 }
443                     
444                 availabilityCalculator.executeNode(node);
445                 
446                 m_graph.doToChildren(
447                     node,
448                     [&] (Edge& edge) {
449                         if (Node* materialize = mapping.get(edge.node()))
450                             edge.setNode(materialize);
451                     });
452                 
453                 // If you cause an escape, you shouldn't see the original escapee.
454                 if (validationEnabled()) {
455                     handleNode(
456                         node,
457                         [&] () { },
458                         [&] (Node* escapee) {
459                             DFG_ASSERT(m_graph, node, !m_sinkCandidates.contains(escapee));
460                         });
461                 }
462             }
463             
464             NodeAndIndex terminal = block->findTerminal();
465             size_t upsilonInsertionPoint = terminal.index;
466             Node* upsilonWhere = terminal.node;
467             NodeOrigin upsilonOrigin = upsilonWhere->origin;
468             for (BasicBlock* successorBlock : block->successors()) {
469                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
470                     Node* phiNode = phiDef->value();
471                     SSACalculator::Variable* variable = phiDef->variable();
472                     Node* allocation = indexToNode[variable->index()];
473                     
474                     Node* originalIncoming = mapping.get(allocation);
475                     Node* incoming;
476                     if (originalIncoming == allocation) {
477                         // If we have a Phi that combines materializations with the original
478                         // phantom object, then the path with the phantom object must materialize.
479                         
480                         incoming = createMaterialize(allocation, upsilonWhere);
481                         m_insertionSet.insert(upsilonInsertionPoint, incoming);
482                         insertOSRHintsForUpdate(
483                             m_insertionSet, upsilonInsertionPoint, upsilonOrigin,
484                             availabilityCalculator.m_availability, originalIncoming, incoming);
485                     } else
486                         incoming = originalIncoming;
487                     
488                     m_insertionSet.insertNode(
489                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
490                         OpInfo(phiNode), incoming->defaultEdge());
491                 }
492             }
493             
494             m_insertionSet.execute(block);
495         }
496         
497         // At this point we have dummy materialization nodes along with edges to them. This means
498         // that the part of the control flow graph that prefers to see actual object allocations
499         // is completely fixed up, except for the materializations themselves.
500     }
501     
502     void lowerNonReadingOperationsOnPhantomAllocations()
503     {
504         // Lower everything but reading operations on phantom allocations. We absolutely have to
505         // lower all writes so as to reveal them to the SSA calculator. We cannot lower reads
506         // because the whole point is that those go away completely.
507         
508         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
509             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
510                 Node* node = block->at(nodeIndex);
511                 switch (node->op()) {
512                 case PutByOffset: {
513                     if (m_sinkCandidates.contains(node->child2().node()))
514                         node->convertToPutByOffsetHint();
515                     break;
516                 }
517                     
518                 case PutStructure: {
519                     if (m_sinkCandidates.contains(node->child1().node())) {
520                         Node* structure = m_insertionSet.insertConstant(
521                             nodeIndex, node->origin, JSValue(node->transition()->next));
522                         node->convertToPutStructureHint(structure);
523                     }
524                     break;
525                 }
526                     
527                 case NewObject: {
528                     if (m_sinkCandidates.contains(node)) {
529                         Node* structure = m_insertionSet.insertConstant(
530                             nodeIndex + 1, node->origin, JSValue(node->structure()));
531                         m_insertionSet.insert(
532                             nodeIndex + 1,
533                             PromotedHeapLocation(StructurePLoc, node).createHint(
534                                 m_graph, node->origin, structure));
535                         node->convertToPhantomNewObject();
536                     }
537                     break;
538                 }
539                     
540                 case MaterializeNewObject: {
541                     if (m_sinkCandidates.contains(node)) {
542                         m_insertionSet.insert(
543                             nodeIndex + 1,
544                             PromotedHeapLocation(StructurePLoc, node).createHint(
545                                 m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
546                         for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
547                             unsigned identifierNumber =
548                                 node->objectMaterializationData().m_properties[i].m_identifierNumber;
549                             m_insertionSet.insert(
550                                 nodeIndex + 1,
551                                 PromotedHeapLocation(
552                                     NamedPropertyPLoc, node, identifierNumber).createHint(
553                                     m_graph, node->origin,
554                                     m_graph.varArgChild(node, i + 1).node()));
555                         }
556                         node->convertToPhantomNewObject();
557                     }
558                     break;
559                 }
560                     
561                 case StoreBarrier:
562                 case StoreBarrierWithNullCheck: {
563                     if (m_sinkCandidates.contains(node->child1().node()))
564                         node->convertToPhantom();
565                     break;
566                 }
567                     
568                 default:
569                     break;
570                 }
571                 
572                 m_graph.doToChildren(
573                     node,
574                     [&] (Edge& edge) {
575                         if (m_sinkCandidates.contains(edge.node()))
576                             edge.setUseKind(KnownCellUse);
577                     });
578             }
579             m_insertionSet.execute(block);
580         }
581     }
582     
583     void promoteSunkenFields()
584     {
585         // Collect the set of heap locations that we will be operating over.
586         HashSet<PromotedHeapLocation> locations;
587         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
588             for (Node* node : *block) {
589                 promoteHeapAccess(
590                     node,
591                     [&] (PromotedHeapLocation location, Edge) {
592                         if (m_sinkCandidates.contains(location.base()))
593                             locations.add(location);
594                     },
595                     [&] (PromotedHeapLocation location) {
596                         if (m_sinkCandidates.contains(location.base()))
597                             locations.add(location);
598                     });
599             }
600         }
601         
602         // Figure out which locations belong to which allocations.
603         m_locationsForAllocation.clear();
604         for (PromotedHeapLocation location : locations) {
605             auto result = m_locationsForAllocation.add(location.base(), Vector<PromotedHeapLocation>());
606             ASSERT(!result.iterator->value.contains(location));
607             result.iterator->value.append(location);
608         }
609         
610         // For each sunken thingy, make sure we create Bottom values for all of its fields.
611         // Note that this has the hilarious slight inefficiency of creating redundant hints for
612         // things that were previously materializations. This should only impact compile times and
613         // not code quality, and it's necessary for soundness without some data structure hackage.
614         // For example, a MaterializeNewObject that we choose to sink may have new fields added to
615         // it conditionally. That would necessitate Bottoms.
616         Node* bottom = nullptr;
617         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
618             if (block == m_graph.block(0))
619                 bottom = m_insertionSet.insertNode(0, SpecNone, BottomValue, NodeOrigin());
620             
621             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
622                 Node* node = block->at(nodeIndex);
623                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
624                     m_insertionSet.insert(
625                         nodeIndex + 1, location.createHint(m_graph, node->origin, bottom));
626                 }
627             }
628             m_insertionSet.execute(block);
629         }
630
631         m_ssaCalculator.reset();
632
633         // Collect the set of "variables" that we will be sinking.
634         m_locationToVariable.clear();
635         m_indexToLocation.clear();
636         for (PromotedHeapLocation location : locations) {
637             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
638             m_locationToVariable.add(location, variable);
639             ASSERT(m_indexToLocation.size() == variable->index());
640             m_indexToLocation.append(location);
641         }
642         
643         // Create Defs from the existing hints.
644         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
645             for (Node* node : *block) {
646                 promoteHeapAccess(
647                     node,
648                     [&] (PromotedHeapLocation location, Edge value) {
649                         if (!m_sinkCandidates.contains(location.base()))
650                             return;
651                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
652                         m_ssaCalculator.newDef(variable, block, value.node());
653                     },
654                     [&] (PromotedHeapLocation) { });
655             }
656         }
657         
658         // OMG run the SSA calculator to create Phis!
659         m_ssaCalculator.computePhis(
660             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
661                 PromotedHeapLocation location = m_indexToLocation[variable->index()];
662                 if (!block->ssa->liveAtHead.contains(location.base()))
663                     return nullptr;
664                 
665                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
666                 phiNode->mergeFlags(NodeResultJS);
667                 return phiNode;
668             });
669         
670         // Place Phis in the right places, replace all uses of any load with the appropriate
671         // value, and create the appropriate Upsilon nodes.
672         m_graph.clearReplacements();
673         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
674             // This mapping table is intended to be lazy. If something is omitted from the table,
675             // it means that there haven't been any local stores to that promoted heap location.
676             m_localMapping.clear();
677             
678             // Insert the Phi functions that we had previously created.
679             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
680                 PromotedHeapLocation location = m_indexToLocation[phiDef->variable()->index()];
681                 
682                 m_insertionSet.insert(
683                     0, phiDef->value());
684                 m_insertionSet.insert(
685                     0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
686                 m_localMapping.add(location, phiDef->value());
687             }
688             
689             if (verbose)
690                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
691             
692             // Process the block and replace all uses of loads with the promoted value.
693             for (Node* node : *block) {
694                 m_graph.performSubstitution(node);
695                 
696                 if (Node* escapee = m_materializationToEscapee.get(node))
697                     populateMaterialize(block, node, escapee);
698                 
699                 promoteHeapAccess(
700                     node,
701                     [&] (PromotedHeapLocation location, Edge value) {
702                         if (m_sinkCandidates.contains(location.base()))
703                             m_localMapping.set(location, value.node());
704                     },
705                     [&] (PromotedHeapLocation location) {
706                         if (m_sinkCandidates.contains(location.base()))
707                             node->replaceWith(resolve(block, location));
708                     });
709             }
710             
711             // Gotta drop some Upsilons.
712             NodeAndIndex terminal = block->findTerminal();
713             size_t upsilonInsertionPoint = terminal.index;
714             NodeOrigin upsilonOrigin = terminal.node->origin;
715             for (BasicBlock* successorBlock : block->successors()) {
716                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
717                     Node* phiNode = phiDef->value();
718                     SSACalculator::Variable* variable = phiDef->variable();
719                     PromotedHeapLocation location = m_indexToLocation[variable->index()];
720                     Node* incoming = resolve(block, location);
721                     
722                     m_insertionSet.insertNode(
723                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
724                         OpInfo(phiNode), incoming->defaultEdge());
725                 }
726             }
727             
728             m_insertionSet.execute(block);
729         }
730     }
731     
732     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
733     {
734         if (Node* result = m_localMapping.get(location))
735             return result;
736         
737         // This implies that there is no local mapping. Find a non-local mapping.
738         SSACalculator::Def* def = m_ssaCalculator.nonLocalReachingDef(
739             block, m_locationToVariable.get(location));
740         ASSERT(def);
741         ASSERT(def->value());
742         m_localMapping.add(location, def->value());
743         return def->value();
744     }
745
746     template<typename SinkCandidateFunctor, typename EscapeFunctor>
747     void handleNode(
748         Node* node,
749         const SinkCandidateFunctor& sinkCandidate,
750         const EscapeFunctor& escape)
751     {
752         switch (node->op()) {
753         case NewObject:
754         case MaterializeNewObject:
755             sinkCandidate();
756             m_graph.doToChildren(
757                 node,
758                 [&] (Edge edge) {
759                     escape(edge.node());
760                 });
761             break;
762             
763         case CheckStructure:
764         case GetByOffset:
765         case MultiGetByOffset:
766         case PutStructure:
767         case GetGetterSetterByOffset:
768         case MovHint:
769         case Phantom:
770         case Check:
771         case MustGenerate:
772         case StoreBarrier:
773         case StoreBarrierWithNullCheck:
774         case PutHint:
775             break;
776             
777         case PutByOffset:
778             escape(node->child3().node());
779             break;
780             
781         case MultiPutByOffset:
782             // FIXME: In the future we should be able to handle this. It's just a matter of
783             // building the appropriate *Hint variant of this instruction, along with a
784             // PhantomStructureSelect node - since this transforms the Structure in a conditional
785             // way.
786             // https://bugs.webkit.org/show_bug.cgi?id=136924
787             escape(node->child1().node());
788             escape(node->child2().node());
789             break;
790
791         default:
792             m_graph.doToChildren(
793                 node,
794                 [&] (Edge edge) {
795                     escape(edge.node());
796                 });
797             break;
798         }
799     }
800     
801     Node* createMaterialize(Node* escapee, Node* where)
802     {
803         Node* result = nullptr;
804         
805         switch (escapee->op()) {
806         case NewObject:
807         case MaterializeNewObject: {
808             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
809             
810             result = m_graph.addNode(
811                 escapee->prediction(), Node::VarArg, MaterializeNewObject,
812                 NodeOrigin(
813                     escapee->origin.semantic,
814                     where->origin.forExit),
815                 OpInfo(data), OpInfo(), 0, 0);
816             break;
817         }
818             
819         default:
820             DFG_CRASH(m_graph, escapee, "Bad escapee op");
821             break;
822         }
823         
824         if (verbose)
825             dataLog("Creating materialization point at ", where, " for ", escapee, ": ", result, "\n");
826         
827         m_materializationToEscapee.add(result, escapee);
828         m_materializationSiteToMaterializations.add(
829             where, Vector<Node*>()).iterator->value.append(result);
830         
831         return result;
832     }
833     
834     void populateMaterialize(BasicBlock* block, Node* node, Node* escapee)
835     {
836         switch (node->op()) {
837         case MaterializeNewObject: {
838             ObjectMaterializationData& data = node->objectMaterializationData();
839             unsigned firstChild = m_graph.m_varArgChildren.size();
840             
841             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
842             
843             PromotedHeapLocation structure(StructurePLoc, escapee);
844             ASSERT(locations.contains(structure));
845             
846             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
847             
848             for (unsigned i = 0; i < locations.size(); ++i) {
849                 switch (locations[i].kind()) {
850                 case StructurePLoc: {
851                     ASSERT(locations[i] == structure);
852                     break;
853                 }
854                     
855                 case NamedPropertyPLoc: {
856                     Node* value = resolve(block, locations[i]);
857                     if (value->op() == BottomValue) {
858                         // We can skip Bottoms entirely.
859                         break;
860                     }
861                     
862                     data.m_properties.append(PhantomPropertyValue(locations[i].info()));
863                     m_graph.m_varArgChildren.append(value);
864                     break;
865                 }
866                     
867                 default:
868                     DFG_CRASH(m_graph, node, "Bad location kind");
869                 }
870             }
871             
872             node->children = AdjacencyList(
873                 AdjacencyList::Variable,
874                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
875             break;
876         }
877             
878         default:
879             DFG_CRASH(m_graph, node, "Bad materialize op");
880             break;
881         }
882     }
883     
884     SSACalculator m_ssaCalculator;
885     HashSet<Node*> m_sinkCandidates;
886     HashMap<Node*, Node*> m_materializationToEscapee;
887     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
888     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
889     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
890     Vector<PromotedHeapLocation> m_indexToLocation;
891     HashMap<PromotedHeapLocation, Node*> m_localMapping;
892     InsertionSet m_insertionSet;
893 };
894     
895 bool performObjectAllocationSinking(Graph& graph)
896 {
897     SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
898     return runPhase<ObjectAllocationSinkingPhase>(graph);
899 }
900
901 } } // namespace JSC::DFG
902
903 #endif // ENABLE(DFG_JIT)
904