13aabace682a065cf875086ff3bea24a28bb4389
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractHeap.h"
32 #include "DFGBlockMapInlines.h"
33 #include "DFGClobberize.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertOSRHintsForUpdate.h"
36 #include "DFGInsertionSet.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromoteHeapAccess.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 namespace JSC { namespace DFG {
46
47 static bool verbose = false;
48
49 class ObjectAllocationSinkingPhase : public Phase {
50 public:
51     ObjectAllocationSinkingPhase(Graph& graph)
52         : Phase(graph, "object allocation sinking")
53         , m_ssaCalculator(graph)
54         , m_insertionSet(graph)
55     {
56     }
57     
58     bool run()
59     {
60         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
61         
62         m_graph.m_dominators.computeIfNecessary(m_graph);
63         
64         // Logically we wish to consider every NewObject and sink it. However it's probably not
65         // profitable to sink a NewObject that will always escape. So, first we do a very simple
66         // forward flow analysis that determines the set of NewObject nodes that have any chance
67         // of benefiting from object allocation sinking. Then we fixpoint the following rules:
68         //
69         // - For each NewObject, we turn the original NewObject into a PhantomNewObject and then
70         //   we insert MaterializeNewObject just before those escaping sites that come before any
71         //   other escaping sites - that is, there is no path between the allocation and those sites
72         //   that would see any other escape. Note that Upsilons constitute escaping sites. Then we
73         //   insert additional MaterializeNewObject nodes on Upsilons that feed into Phis that mix
74         //   materializations and the original PhantomNewObject. We then turn each PutByOffset over a
75         //   PhantomNewObject into a PutByOffsetHint.
76         //
77         // - We perform the same optimization for MaterializeNewObject. This allows us to cover
78         //   cases where we had MaterializeNewObject flowing into a PutByOffsetHint.
79         //
80         // We could also add this rule:
81         //
82         // - If all of the Upsilons of a Phi have a MaterializeNewObject that isn't used by anyone
83         //   else, then replace the Phi with the MaterializeNewObject.
84         //
85         //   FIXME: Implement this. Note that this totally doable but it requires some gnarly
86         //   code, and to be effective the pruner needs to be aware of it. Currently any Upsilon
87         //   is considered to be an escape even by the pruner, so it's unlikely that we'll see
88         //   many cases of Phi over Materializations.
89         //   https://bugs.webkit.org/show_bug.cgi?id=136927
90         
91         if (!performSinking())
92             return false;
93         
94         while (performSinking()) { }
95         
96         if (verbose) {
97             dataLog("Graph after sinking:\n");
98             m_graph.dump();
99         }
100         
101         return true;
102     }
103
104 private:
105     bool performSinking()
106     {
107         m_graph.computeRefCounts();
108         performLivenessAnalysis(m_graph);
109         performOSRAvailabilityAnalysis(m_graph);
110         
111         CString graphBeforeSinking;
112         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
113             StringPrintStream out;
114             m_graph.dump(out);
115             graphBeforeSinking = out.toCString();
116         }
117         
118         if (verbose) {
119             dataLog("Graph before sinking:\n");
120             m_graph.dump();
121         }
122         
123         determineMaterializationPoints();
124         if (m_sinkCandidates.isEmpty())
125             return false;
126         
127         // At this point we are committed to sinking the sinking candidates.
128         placeMaterializationPoints();
129         lowerNonReadingOperationsOnPhantomAllocations();
130         promoteSunkenFields();
131         
132         if (Options::validateGraphAtEachPhase())
133             validate(m_graph, DumpGraph, graphBeforeSinking);
134         
135         if (verbose)
136             dataLog("Sinking iteration changed the graph.\n");
137         return true;
138     }
139     
140     void determineMaterializationPoints()
141     {
142         // The premise of this pass is that if there exists a point in the program where some
143         // path from a phantom allocation site to that point causes materialization, then *all*
144         // paths cause materialization. This should mean that there are never any redundant
145         // materializations.
146         
147         m_sinkCandidates.clear();
148         m_edgeToMaterializationPoint.clear();
149         
150         BlockMap<HashMap<Node*, bool>> materializedAtHead(m_graph);
151         BlockMap<HashMap<Node*, bool>> materializedAtTail(m_graph);
152         
153         bool changed;
154         do {
155             if (verbose)
156                 dataLog("Doing iteration of materialization point placement.\n");
157             changed = false;
158             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
159                 HashMap<Node*, bool> materialized = materializedAtHead[block];
160                 for (Node* node : *block) {
161                     handleNode(
162                         node,
163                         [&] () {
164                             materialized.add(node, false);
165                         },
166                         [&] (Node* escapee) {
167                             auto iter = materialized.find(escapee);
168                             if (iter != materialized.end())
169                                 iter->value = true;
170                         });
171                 }
172                 
173                 if (verbose)
174                     dataLog("    Materialized at tail of ", pointerDump(block), ": ", mapDump(materialized), "\n");
175                 
176                 if (materialized == materializedAtTail[block])
177                     continue;
178                 
179                 materializedAtTail[block] = materialized;
180                 changed = true;
181                 
182                 // Only propagate things to our successors if they are alive in all successors.
183                 // So, we prune materialized-at-tail to only include things that are live.
184                 Vector<Node*> toRemove;
185                 for (auto pair : materialized) {
186                     if (!block->ssa->liveAtTail.contains(pair.key))
187                         toRemove.append(pair.key);
188                 }
189                 for (Node* key : toRemove)
190                     materialized.remove(key);
191                 
192                 for (BasicBlock* successorBlock : block->successors()) {
193                     for (auto pair : materialized) {
194                         materializedAtHead[successorBlock].add(
195                             pair.key, false).iterator->value |= pair.value;
196                     }
197                 }
198             }
199         } while (changed);
200         
201         // Determine the sink candidates. Broadly, a sink candidate is a node that handleNode()
202         // believes is sinkable, and one of the following is true:
203         //
204         // 1) There exists a basic block with only backward outgoing edges (or no outgoing edges)
205         //    in which the node wasn't materialized. This is meant to catch effectively-infinite
206         //    loops in which we don't need to have allocated the object.
207         //
208         // 2) There exists a basic block at the tail of which the node is not materialized and the
209         //    node is dead.
210         //
211         // 3) The sum of execution counts of the materializations is less than the sum of
212         //    execution counts of the original node.
213         //
214         // We currently implement only rule #2.
215         // FIXME: Implement the two other rules.
216         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
217         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
218         
219         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
220             for (auto pair : materializedAtTail[block]) {
221                 if (pair.value)
222                     continue; // It was materialized.
223                 
224                 if (block->ssa->liveAtTail.contains(pair.key))
225                     continue; // It might still get materialized in all of the successors.
226                 
227                 // We know that it died in this block and it wasn't materialized. That means that
228                 // if we sink this allocation, then *this* will be a path along which we never
229                 // have to allocate. Profit!
230                 m_sinkCandidates.add(pair.key);
231             }
232         }
233         
234         if (m_sinkCandidates.isEmpty())
235             return;
236         
237         // A materialization edge exists at any point where a node escapes but hasn't been
238         // materialized yet.
239         //
240         // FIXME: This can create duplicate allocations when we really only needed to perform one.
241         // For example:
242         //
243         //     var o = new Object();
244         //     if (rare) {
245         //         if (stuff)
246         //             call(o); // o escapes here.
247         //         return;
248         //     }
249         //     // o doesn't escape down here.
250         //
251         // In this example, we would place a materialization point at call(o) and then we would find
252         // ourselves having to insert another one at the implicit else case of that if statement
253         // ('cause we've broken critical edges). We would instead really like to just have one
254         // materialization point right at the top of the then case of "if (rare)". To do this, we can
255         // find the LCA of the various materializations in the dom tree.
256         // https://bugs.webkit.org/show_bug.cgi?id=137124
257         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
258             HashSet<Node*> materialized;
259             for (auto pair : materializedAtHead[block]) {
260                 if (pair.value && m_sinkCandidates.contains(pair.key))
261                     materialized.add(pair.key);
262             }
263             
264             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
265                 Node* node = block->at(nodeIndex);
266                 
267                 handleNode(
268                     node,
269                     [&] () { },
270                     [&] (Node* escapee) {
271                         if (!m_sinkCandidates.contains(escapee))
272                             return;
273                         
274                         if (!materialized.add(escapee).isNewEntry)
275                             return;
276                         
277                         Node* materialize = createMaterialize(escapee, node->origin);
278                         if (verbose)
279                             dataLog("Adding materialization point: ", node, "->", escapee, " = ", materialize, "\n");
280                         m_edgeToMaterializationPoint.add(
281                             std::make_pair(node, escapee), materialize);
282                     });
283             }
284         }
285     }
286     
287     void placeMaterializationPoints()
288     {
289         m_ssaCalculator.reset();
290         
291         HashMap<Node*, SSACalculator::Variable*> nodeToVariable;
292         Vector<Node*> indexToNode;
293         
294         for (Node* node : m_sinkCandidates) {
295             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
296             nodeToVariable.add(node, variable);
297             ASSERT(indexToNode.size() == variable->index());
298             indexToNode.append(node);
299         }
300         
301         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
302             for (Node* node : *block) {
303                 if (SSACalculator::Variable* variable = nodeToVariable.get(node))
304                     m_ssaCalculator.newDef(variable, block, node);
305                 
306                 m_graph.doToChildren(
307                     node,
308                     [&] (Edge edge) {
309                         Node* materialize =
310                             m_edgeToMaterializationPoint.get(std::make_pair(node, edge.node()));
311                         if (!materialize)
312                             return;
313                         
314                         m_ssaCalculator.newDef(
315                             nodeToVariable.get(edge.node()), block, materialize);
316                     });
317             }
318         }
319         
320         m_ssaCalculator.computePhis(
321             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
322                 Node* allocation = indexToNode[variable->index()];
323                 if (!block->ssa->liveAtHead.contains(allocation))
324                     return nullptr;
325                 
326                 Node* phiNode = m_graph.addNode(allocation->prediction(), Phi, NodeOrigin());
327                 phiNode->mergeFlags(NodeResultJS);
328                 return phiNode;
329             });
330         
331         // Place Phis in the right places. Replace all uses of any allocation with the appropriate
332         // materialization. Create the appropriate Upsilon nodes.
333         LocalOSRAvailabilityCalculator availabilityCalculator;
334         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
335             HashMap<Node*, Node*> mapping;
336             
337             for (Node* candidate : block->ssa->liveAtHead) {
338                 SSACalculator::Variable* variable = nodeToVariable.get(candidate);
339                 if (!variable)
340                     continue;
341                 
342                 SSACalculator::Def* def = m_ssaCalculator.reachingDefAtHead(block, variable);
343                 if (!def)
344                     continue;
345                 
346                 mapping.set(indexToNode[variable->index()], def->value());
347             }
348             
349             availabilityCalculator.beginBlock(block);
350             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
351                 m_insertionSet.insert(0, phiDef->value());
352                 
353                 Node* originalNode = indexToNode[phiDef->variable()->index()];
354                 insertOSRHintsForUpdate(
355                     m_insertionSet, 0, NodeOrigin(), availabilityCalculator.m_availability,
356                     originalNode, phiDef->value());
357
358                 mapping.set(originalNode, phiDef->value());
359             }
360             
361             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
362                 Node* node = block->at(nodeIndex);
363
364                 m_graph.doToChildren(
365                     node,
366                     [&] (Edge edge) {
367                         Node* materialize = m_edgeToMaterializationPoint.get(
368                             std::make_pair(node, edge.node()));
369                         if (materialize) {
370                             m_insertionSet.insert(nodeIndex, materialize);
371                             insertOSRHintsForUpdate(
372                                 m_insertionSet, nodeIndex, node->origin,
373                                 availabilityCalculator.m_availability, edge.node(), materialize);
374                             mapping.set(edge.node(), materialize);
375                         }
376                     });
377
378                 availabilityCalculator.executeNode(node);
379                 
380                 m_graph.doToChildren(
381                     node,
382                     [&] (Edge& edge) {
383                         if (Node* materialize = mapping.get(edge.node()))
384                             edge.setNode(materialize);
385                     });
386             }
387             
388             size_t upsilonInsertionPoint = block->size() - 1;
389             NodeOrigin upsilonOrigin = block->last()->origin;
390             for (BasicBlock* successorBlock : block->successors()) {
391                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
392                     Node* phiNode = phiDef->value();
393                     SSACalculator::Variable* variable = phiDef->variable();
394                     Node* allocation = indexToNode[variable->index()];
395                     
396                     Node* originalIncoming = mapping.get(allocation);
397                     Node* incoming;
398                     if (originalIncoming == allocation) {
399                         // If we have a Phi that combines materializations with the original
400                         // phantom object, then the path with the phantom object must materialize.
401                         
402                         incoming = createMaterialize(allocation, upsilonOrigin);
403                         m_insertionSet.insert(upsilonInsertionPoint, incoming);
404                         insertOSRHintsForUpdate(
405                             m_insertionSet, upsilonInsertionPoint, upsilonOrigin,
406                             availabilityCalculator.m_availability, originalIncoming, incoming);
407                     } else
408                         incoming = originalIncoming;
409                     
410                     Node* upsilon = m_insertionSet.insertNode(
411                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
412                         OpInfo(phiNode), incoming->defaultEdge());
413                     
414                     if (originalIncoming == allocation) {
415                         m_edgeToMaterializationPoint.add(
416                             std::make_pair(upsilon, allocation), incoming);
417                     }
418                 }
419             }
420             
421             m_insertionSet.execute(block);
422         }
423         
424         // At this point we have dummy materialization nodes along with edges to them. This means
425         // that the part of the control flow graph that prefers to see actual object allocations
426         // is completely fixed up, except for the materializations themselves.
427     }
428     
429     void lowerNonReadingOperationsOnPhantomAllocations()
430     {
431         // Lower everything but reading operations on phantom allocations. We absolutely have to
432         // lower all writes so as to reveal them to the SSA calculator. We cannot lower reads
433         // because the whole point is that those go away completely.
434         
435         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
436             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
437                 Node* node = block->at(nodeIndex);
438                 switch (node->op()) {
439                 case PutByOffset: {
440                     if (m_sinkCandidates.contains(node->child2().node()))
441                         node->convertToPutByOffsetHint();
442                     break;
443                 }
444                     
445                 case PutStructure: {
446                     if (m_sinkCandidates.contains(node->child1().node())) {
447                         Node* structure = m_insertionSet.insertConstant(
448                             nodeIndex, node->origin, JSValue(node->transition()->next));
449                         node->convertToPutStructureHint(structure);
450                     }
451                     break;
452                 }
453                     
454                 case NewObject: {
455                     if (m_sinkCandidates.contains(node)) {
456                         Node* structure = m_insertionSet.insertConstant(
457                             nodeIndex + 1, node->origin, JSValue(node->structure()));
458                         m_insertionSet.insertNode(
459                             nodeIndex + 1, SpecNone, PutStructureHint, node->origin,
460                             Edge(node, KnownCellUse), Edge(structure, KnownCellUse));
461                         node->convertToPhantomNewObject();
462                     }
463                     break;
464                 }
465                     
466                 case MaterializeNewObject: {
467                     if (m_sinkCandidates.contains(node)) {
468                         m_insertionSet.insertNode(
469                             nodeIndex + 1, SpecNone, PutStructureHint, node->origin,
470                             Edge(node, KnownCellUse), m_graph.varArgChild(node, 0));
471                         for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
472                             m_insertionSet.insertNode(
473                                 nodeIndex + 1, SpecNone, PutByOffsetHint, node->origin,
474                                 Edge(node, KnownCellUse), m_graph.varArgChild(node, i + 1));
475                         }
476                         node->convertToPhantomNewObject();
477                     }
478                     break;
479                 }
480                     
481                 case StoreBarrier:
482                 case StoreBarrierWithNullCheck: {
483                     if (m_sinkCandidates.contains(node->child1().node()))
484                         node->convertToPhantom();
485                     break;
486                 }
487                     
488                 default:
489                     break;
490                 }
491                 
492                 m_graph.doToChildren(
493                     node,
494                     [&] (Edge& edge) {
495                         if (m_sinkCandidates.contains(edge.node()))
496                             edge.setUseKind(KnownCellUse);
497                     });
498             }
499             m_insertionSet.execute(block);
500         }
501     }
502     
503     void promoteSunkenFields()
504     {
505         // Henceforth when we encounter a materialization point, we will want to ask *who* it is
506         // a materialization for. Invert the map to be able to answer such questions.
507         m_materializationPointToEscapee.clear();
508         for (auto pair : m_edgeToMaterializationPoint)
509             m_materializationPointToEscapee.add(pair.value, pair.key.second);
510         
511         // Collect the set of heap locations that we will be operating over.
512         HashSet<PromotedHeapLocation> locations;
513         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
514             for (Node* node : *block) {
515                 promoteHeapAccess(
516                     node,
517                     [&] (PromotedHeapLocation location, Edge) {
518                         locations.add(location);
519                     },
520                     [&] (PromotedHeapLocation location) {
521                         locations.add(location);
522                     });
523             }
524         }
525         
526         // Figure out which locations belong to which allocations.
527         m_locationsForAllocation.clear();
528         for (PromotedHeapLocation location : locations) {
529             auto result = m_locationsForAllocation.add(location.base(), Vector<PromotedHeapLocation>());
530             ASSERT(!result.iterator->value.contains(location));
531             result.iterator->value.append(location);
532         }
533         
534         // For each sunken thingy, make sure we create Bottom values for all of its fields.
535         // Note that this has the hilarious slight inefficiency of creating redundant hints for
536         // things that were previously materializations. This should only impact compile times and
537         // not code quality, and it's necessary for soundness without some data structure hackage.
538         // For example, a MaterializeNewObject that we choose to sink may have new fields added to
539         // it conditionally. That would necessitate Bottoms.
540         Node* bottom = nullptr;
541         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
542             if (block == m_graph.block(0))
543                 bottom = m_insertionSet.insertNode(0, SpecNone, BottomValue, NodeOrigin());
544             
545             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
546                 Node* node = block->at(nodeIndex);
547                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
548                     m_insertionSet.insert(
549                         nodeIndex + 1, location.createHint(m_graph, node->origin, bottom));
550                 }
551             }
552             m_insertionSet.execute(block);
553         }
554
555         m_ssaCalculator.reset();
556
557         // Collect the set of "variables" that we will be sinking.
558         m_locationToVariable.clear();
559         m_indexToLocation.clear();
560         for (PromotedHeapLocation location : locations) {
561             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
562             m_locationToVariable.add(location, variable);
563             ASSERT(m_indexToLocation.size() == variable->index());
564             m_indexToLocation.append(location);
565         }
566         
567         // Create Defs from the existing hints.
568         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
569             for (Node* node : *block) {
570                 promoteHeapAccess(
571                     node,
572                     [&] (PromotedHeapLocation location, Edge value) {
573                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
574                         m_ssaCalculator.newDef(variable, block, value.node());
575                     },
576                     [&] (PromotedHeapLocation) { });
577             }
578         }
579         
580         // OMG run the SSA calculator to create Phis!
581         m_ssaCalculator.computePhis(
582             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
583                 PromotedHeapLocation location = m_indexToLocation[variable->index()];
584                 if (!block->ssa->liveAtHead.contains(location.base()))
585                     return nullptr;
586                 
587                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
588                 phiNode->mergeFlags(NodeResultJS);
589                 return phiNode;
590             });
591         
592         // Place Phis in the right places, replace all uses of any load with the appropriate
593         // value, and create the appropriate Upsilon nodes.
594         m_graph.clearReplacements();
595         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
596             // This mapping table is intended to be lazy. If something is omitted from the table,
597             // it means that there haven't been any local stores to that promoted heap location.
598             m_localMapping.clear();
599             
600             // Insert the Phi functions that we had previously created.
601             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
602                 PromotedHeapLocation location = m_indexToLocation[phiDef->variable()->index()];
603                 
604                 m_insertionSet.insert(
605                     0, phiDef->value());
606                 m_insertionSet.insert(
607                     0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
608                 m_localMapping.add(location, phiDef->value());
609             }
610             
611             if (verbose)
612                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
613             
614             // Process the block and replace all uses of loads with the promoted value.
615             for (Node* node : *block) {
616                 m_graph.performSubstitution(node);
617                 
618                 if (Node* escapee = m_materializationPointToEscapee.get(node))
619                     populateMaterialize(block, node, escapee);
620                 
621                 promoteHeapAccess(
622                     node,
623                     [&] (PromotedHeapLocation location, Edge value) {
624                         m_localMapping.set(location, value.node());
625                     },
626                     [&] (PromotedHeapLocation location) {
627                         node->replaceWith(resolve(block, location));
628                     });
629             }
630             
631             // Gotta drop some Upsilons.
632             size_t upsilonInsertionPoint = block->size() - 1;
633             NodeOrigin upsilonOrigin = block->last()->origin;
634             for (BasicBlock* successorBlock : block->successors()) {
635                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
636                     Node* phiNode = phiDef->value();
637                     SSACalculator::Variable* variable = phiDef->variable();
638                     PromotedHeapLocation location = m_indexToLocation[variable->index()];
639                     Node* incoming = resolve(block, location);
640                     
641                     m_insertionSet.insertNode(
642                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
643                         OpInfo(phiNode), incoming->defaultEdge());
644                 }
645             }
646             
647             m_insertionSet.execute(block);
648         }
649     }
650     
651     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
652     {
653         if (Node* result = m_localMapping.get(location))
654             return result;
655         
656         // This implies that there is no local mapping. Find a non-local mapping.
657         SSACalculator::Def* def = m_ssaCalculator.nonLocalReachingDef(
658             block, m_locationToVariable.get(location));
659         ASSERT(def);
660         ASSERT(def->value());
661         m_localMapping.add(location, def->value());
662         return def->value();
663     }
664
665     template<typename SinkCandidateFunctor, typename EscapeFunctor>
666     void handleNode(
667         Node* node,
668         const SinkCandidateFunctor& sinkCandidate,
669         const EscapeFunctor& escape)
670     {
671         switch (node->op()) {
672         case NewObject:
673         case MaterializeNewObject:
674             sinkCandidate();
675             m_graph.doToChildren(
676                 node,
677                 [&] (Edge edge) {
678                     escape(edge.node());
679                 });
680             break;
681             
682         case CheckStructure:
683         case GetByOffset:
684         case MultiGetByOffset:
685         case PutStructure:
686         case GetGetterSetterByOffset:
687         case MovHint:
688         case Phantom:
689         case Check:
690         case HardPhantom:
691         case StoreBarrier:
692         case StoreBarrierWithNullCheck:
693         case PutByOffsetHint:
694             break;
695             
696         case PutByOffset:
697             escape(node->child3().node());
698             break;
699             
700         case MultiPutByOffset:
701             // FIXME: In the future we should be able to handle this. It's just a matter of
702             // building the appropriate *Hint variant of this instruction, along with a
703             // PhantomStructureSelect node - since this transforms the Structure in a conditional
704             // way.
705             // https://bugs.webkit.org/show_bug.cgi?id=136924
706             escape(node->child1().node());
707             escape(node->child2().node());
708             break;
709
710         default:
711             m_graph.doToChildren(
712                 node,
713                 [&] (Edge edge) {
714                     escape(edge.node());
715                 });
716             break;
717         }
718     }
719     
720     Node* createMaterialize(Node* escapee, const NodeOrigin whereOrigin)
721     {
722         switch (escapee->op()) {
723         case NewObject:
724         case MaterializeNewObject: {
725             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
726             
727             Node* result = m_graph.addNode(
728                 escapee->prediction(), Node::VarArg, MaterializeNewObject,
729                 NodeOrigin(
730                     escapee->origin.semantic,
731                     whereOrigin.forExit),
732                 OpInfo(data), OpInfo(), 0, 0);
733             return result;
734         }
735             
736         default:
737             DFG_CRASH(m_graph, escapee, "Bad escapee op");
738             return nullptr;
739         }
740     }
741     
742     void populateMaterialize(BasicBlock* block, Node* node, Node* escapee)
743     {
744         switch (node->op()) {
745         case MaterializeNewObject: {
746             ObjectMaterializationData& data = node->objectMaterializationData();
747             unsigned firstChild = m_graph.m_varArgChildren.size();
748             
749             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
750             
751             PromotedHeapLocation structure(StructurePLoc, escapee);
752             ASSERT(locations.contains(structure));
753             
754             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
755             
756             for (unsigned i = 0; i < locations.size(); ++i) {
757                 switch (locations[i].kind()) {
758                 case StructurePLoc: {
759                     ASSERT(locations[i] == structure);
760                     break;
761                 }
762                     
763                 case NamedPropertyPLoc: {
764                     Node* value = resolve(block, locations[i]);
765                     if (value->op() == BottomValue) {
766                         // We can skip Bottoms entirely.
767                         break;
768                     }
769                     
770                     data.m_properties.append(PhantomPropertyValue(locations[i].info()));
771                     m_graph.m_varArgChildren.append(value);
772                     break;
773                 }
774                     
775                 default:
776                     DFG_CRASH(m_graph, node, "Bad location kind");
777                 }
778             }
779             
780             node->children = AdjacencyList(
781                 AdjacencyList::Variable,
782                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
783             break;
784         }
785             
786         default:
787             DFG_CRASH(m_graph, node, "Bad materialize op");
788             break;
789         }
790     }
791     
792     SSACalculator m_ssaCalculator;
793     HashSet<Node*> m_sinkCandidates;
794     HashMap<std::pair<Node*, Node*>, Node*> m_edgeToMaterializationPoint;
795     HashMap<Node*, Node*> m_materializationPointToEscapee;
796     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
797     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
798     Vector<PromotedHeapLocation> m_indexToLocation;
799     HashMap<PromotedHeapLocation, Node*> m_localMapping;
800     InsertionSet m_insertionSet;
801 };
802     
803 bool performObjectAllocationSinking(Graph& graph)
804 {
805     SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
806     return runPhase<ObjectAllocationSinkingPhase>(graph);
807 }
808
809 } } // namespace JSC::DFG
810
811 #endif // ENABLE(DFG_JIT)
812