The liveness pruning done by ObjectAllocationSinkingPhase ignores the possibility...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGAbstractHeap.h"
32 #include "DFGBlockMapInlines.h"
33 #include "DFGClobberize.h"
34 #include "DFGCombinedLiveness.h"
35 #include "DFGGraph.h"
36 #include "DFGInsertOSRHintsForUpdate.h"
37 #include "DFGInsertionSet.h"
38 #include "DFGLivenessAnalysisPhase.h"
39 #include "DFGOSRAvailabilityAnalysisPhase.h"
40 #include "DFGPhase.h"
41 #include "DFGPromoteHeapAccess.h"
42 #include "DFGSSACalculator.h"
43 #include "DFGValidate.h"
44 #include "JSCInlines.h"
45
46 namespace JSC { namespace DFG {
47
48 static bool verbose = false;
49
50 class ObjectAllocationSinkingPhase : public Phase {
51 public:
52     ObjectAllocationSinkingPhase(Graph& graph)
53         : Phase(graph, "object allocation sinking")
54         , m_ssaCalculator(graph)
55         , m_insertionSet(graph)
56     {
57     }
58     
59     bool run()
60     {
61         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
62         
63         m_graph.m_dominators.computeIfNecessary(m_graph);
64         
65         // Logically we wish to consider every NewObject and sink it. However it's probably not
66         // profitable to sink a NewObject that will always escape. So, first we do a very simple
67         // forward flow analysis that determines the set of NewObject nodes that have any chance
68         // of benefiting from object allocation sinking. Then we fixpoint the following rules:
69         //
70         // - For each NewObject, we turn the original NewObject into a PhantomNewObject and then
71         //   we insert MaterializeNewObject just before those escaping sites that come before any
72         //   other escaping sites - that is, there is no path between the allocation and those sites
73         //   that would see any other escape. Note that Upsilons constitute escaping sites. Then we
74         //   insert additional MaterializeNewObject nodes on Upsilons that feed into Phis that mix
75         //   materializations and the original PhantomNewObject. We then turn each PutByOffset over a
76         //   PhantomNewObject into a PutHint.
77         //
78         // - We perform the same optimization for MaterializeNewObject. This allows us to cover
79         //   cases where we had MaterializeNewObject flowing into a PutHint.
80         //
81         // We could also add this rule:
82         //
83         // - If all of the Upsilons of a Phi have a MaterializeNewObject that isn't used by anyone
84         //   else, then replace the Phi with the MaterializeNewObject.
85         //
86         //   FIXME: Implement this. Note that this totally doable but it requires some gnarly
87         //   code, and to be effective the pruner needs to be aware of it. Currently any Upsilon
88         //   is considered to be an escape even by the pruner, so it's unlikely that we'll see
89         //   many cases of Phi over Materializations.
90         //   https://bugs.webkit.org/show_bug.cgi?id=136927
91         
92         if (!performSinking())
93             return false;
94         
95         while (performSinking()) { }
96         
97         if (verbose) {
98             dataLog("Graph after sinking:\n");
99             m_graph.dump();
100         }
101         
102         return true;
103     }
104
105 private:
106     bool performSinking()
107     {
108         m_graph.computeRefCounts();
109         performLivenessAnalysis(m_graph);
110         performOSRAvailabilityAnalysis(m_graph);
111         m_combinedLiveness = CombinedLiveness(m_graph);
112         
113         CString graphBeforeSinking;
114         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
115             StringPrintStream out;
116             m_graph.dump(out);
117             graphBeforeSinking = out.toCString();
118         }
119         
120         if (verbose) {
121             dataLog("Graph before sinking:\n");
122             m_graph.dump();
123         }
124         
125         determineMaterializationPoints();
126         if (m_sinkCandidates.isEmpty())
127             return false;
128         
129         // At this point we are committed to sinking the sinking candidates.
130         placeMaterializationPoints();
131         lowerNonReadingOperationsOnPhantomAllocations();
132         promoteSunkenFields();
133         
134         if (Options::validateGraphAtEachPhase())
135             validate(m_graph, DumpGraph, graphBeforeSinking);
136         
137         if (verbose)
138             dataLog("Sinking iteration changed the graph.\n");
139         return true;
140     }
141     
142     void determineMaterializationPoints()
143     {
144         // The premise of this pass is that if there exists a point in the program where some
145         // path from a phantom allocation site to that point causes materialization, then *all*
146         // paths cause materialization. This should mean that there are never any redundant
147         // materializations.
148         
149         m_sinkCandidates.clear();
150         m_materializationToEscapee.clear();
151         m_materializationSiteToMaterializations.clear();
152         
153         BlockMap<HashMap<Node*, bool>> materializedAtHead(m_graph);
154         BlockMap<HashMap<Node*, bool>> materializedAtTail(m_graph);
155         
156         bool changed;
157         do {
158             if (verbose)
159                 dataLog("Doing iteration of materialization point placement.\n");
160             changed = false;
161             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
162                 HashMap<Node*, bool> materialized = materializedAtHead[block];
163                 if (verbose)
164                     dataLog("    Looking at block ", pointerDump(block), "\n");
165                 for (Node* node : *block) {
166                     handleNode(
167                         node,
168                         [&] () {
169                             materialized.add(node, false);
170                         },
171                         [&] (Node* escapee) {
172                             auto iter = materialized.find(escapee);
173                             if (iter != materialized.end()) {
174                                 if (verbose)
175                                     dataLog("    ", escapee, " escapes at ", node, "\n");
176                                 iter->value = true;
177                             }
178                         });
179                 }
180                 
181                 if (verbose)
182                     dataLog("    Materialized at tail of ", pointerDump(block), ": ", mapDump(materialized), "\n");
183                 
184                 if (materialized == materializedAtTail[block])
185                     continue;
186                 
187                 materializedAtTail[block] = materialized;
188                 changed = true;
189                 
190                 // Only propagate things to our successors if they are alive in all successors.
191                 // So, we prune materialized-at-tail to only include things that are live.
192                 Vector<Node*> toRemove;
193                 for (auto pair : materialized) {
194                     if (!m_combinedLiveness.liveAtTail[block].contains(pair.key))
195                         toRemove.append(pair.key);
196                 }
197                 for (Node* key : toRemove)
198                     materialized.remove(key);
199                 
200                 for (BasicBlock* successorBlock : block->successors()) {
201                     for (auto pair : materialized) {
202                         materializedAtHead[successorBlock].add(
203                             pair.key, false).iterator->value |= pair.value;
204                     }
205                 }
206             }
207         } while (changed);
208         
209         // Determine the sink candidates. Broadly, a sink candidate is a node that handleNode()
210         // believes is sinkable, and one of the following is true:
211         //
212         // 1) There exists a basic block with only backward outgoing edges (or no outgoing edges)
213         //    in which the node wasn't materialized. This is meant to catch effectively-infinite
214         //    loops in which we don't need to have allocated the object.
215         //
216         // 2) There exists a basic block at the tail of which the node is not materialized and the
217         //    node is dead.
218         //
219         // 3) The sum of execution counts of the materializations is less than the sum of
220         //    execution counts of the original node.
221         //
222         // We currently implement only rule #2.
223         // FIXME: Implement the two other rules.
224         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
225         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
226         
227         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
228             for (auto pair : materializedAtTail[block]) {
229                 if (pair.value)
230                     continue; // It was materialized.
231                 
232                 if (m_combinedLiveness.liveAtTail[block].contains(pair.key))
233                     continue; // It might still get materialized in all of the successors.
234                 
235                 // We know that it died in this block and it wasn't materialized. That means that
236                 // if we sink this allocation, then *this* will be a path along which we never
237                 // have to allocate. Profit!
238                 m_sinkCandidates.add(pair.key);
239             }
240         }
241         
242         if (m_sinkCandidates.isEmpty())
243             return;
244         
245         // A materialization edge exists at any point where a node escapes but hasn't been
246         // materialized yet. We do this in two parts. First we find all of the nodes that cause
247         // escaping to happen, where the escapee had not yet been materialized. This catches
248         // everything but loops. We then catch loops - as well as weirder control flow constructs -
249         // in a subsequent pass that looks at places in the CFG where an edge exists from a block
250         // that hasn't materialized to a block that has. We insert the materialization along such an
251         // edge, and we rely on the fact that critical edges were already broken so that we actually
252         // either insert the materialization at the head of the successor or the tail of the
253         // predecessor.
254         //
255         // FIXME: This can create duplicate allocations when we really only needed to perform one.
256         // For example:
257         //
258         //     var o = new Object();
259         //     if (rare) {
260         //         if (stuff)
261         //             call(o); // o escapes here.
262         //         return;
263         //     }
264         //     // o doesn't escape down here.
265         //
266         // In this example, we would place a materialization point at call(o) and then we would find
267         // ourselves having to insert another one at the implicit else case of that if statement
268         // ('cause we've broken critical edges). We would instead really like to just have one
269         // materialization point right at the top of the then case of "if (rare)". To do this, we can
270         // find the LCA of the various materializations in the dom tree.
271         // https://bugs.webkit.org/show_bug.cgi?id=137124
272         
273         // First pass: find intra-block materialization points.
274         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
275             HashSet<Node*> materialized;
276             for (auto pair : materializedAtHead[block]) {
277                 if (pair.value && m_sinkCandidates.contains(pair.key))
278                     materialized.add(pair.key);
279             }
280             
281             if (verbose)
282                 dataLog("Placing materialization points in ", pointerDump(block), " with materialization set ", listDump(materialized), "\n");
283             
284             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
285                 Node* node = block->at(nodeIndex);
286                 
287                 handleNode(
288                     node,
289                     [&] () { },
290                     [&] (Node* escapee) {
291                         if (verbose)
292                             dataLog("Seeing ", escapee, " escape at ", node, "\n");
293                         
294                         if (!m_sinkCandidates.contains(escapee)) {
295                             if (verbose)
296                                 dataLog("    It's not a sink candidate.\n");
297                             return;
298                         }
299                         
300                         if (!materialized.add(escapee).isNewEntry) {
301                             if (verbose)
302                                 dataLog("   It's already materialized.\n");
303                             return;
304                         }
305                         
306                         createMaterialize(escapee, node);
307                     });
308             }
309         }
310         
311         // Second pass: find CFG edge materialization points.
312         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
313             for (BasicBlock* successorBlock : block->successors()) {
314                 for (auto pair : materializedAtHead[successorBlock]) {
315                     Node* allocation = pair.key;
316                     
317                     // We only care if it is materialized in the successor.
318                     if (!pair.value)
319                         continue;
320                     
321                     // We only care about sinking candidates.
322                     if (!m_sinkCandidates.contains(allocation))
323                         continue;
324                     
325                     // We only care if it isn't materialized in the predecessor.
326                     if (materializedAtTail[block].get(allocation))
327                         continue;
328                     
329                     // We need to decide if we put the materialization at the head of the successor,
330                     // or the tail of the predecessor. It needs to be placed so that the allocation
331                     // is never materialized before, and always materialized after.
332                     
333                     // Is it never materialized in any of successor's predecessors? I like to think
334                     // of "successors' predecessors" and "predecessor's successors" as the "shadow",
335                     // because of what it looks like when you draw it.
336                     bool neverMaterializedInShadow = true;
337                     for (BasicBlock* shadowBlock : successorBlock->predecessors) {
338                         if (materializedAtTail[shadowBlock].get(allocation)) {
339                             neverMaterializedInShadow = false;
340                             break;
341                         }
342                     }
343                     
344                     if (neverMaterializedInShadow) {
345                         createMaterialize(allocation, successorBlock->firstOriginNode());
346                         continue;
347                     }
348                     
349                     // Because we had broken critical edges, it must be the case that the
350                     // predecessor's successors all materialize the object. This is because the
351                     // previous case is guaranteed to catch the case where the successor only has
352                     // one predecessor. When critical edges are broken, this is also the only case
353                     // where the predecessor could have had multiple successors. Therefore we have
354                     // already handled the case where the predecessor has multiple successors.
355                     DFG_ASSERT(m_graph, block, block->numSuccessors() == 1);
356                     
357                     createMaterialize(allocation, block->terminal());
358                 }
359             }
360         }
361     }
362     
363     void placeMaterializationPoints()
364     {
365         m_ssaCalculator.reset();
366         
367         // The "variables" are the object allocations that we are sinking. So, nodeToVariable maps
368         // sink candidates (aka escapees) to the SSACalculator's notion of Variable, and indexToNode
369         // maps in the opposite direction using the SSACalculator::Variable::index() as the key.
370         HashMap<Node*, SSACalculator::Variable*> nodeToVariable;
371         Vector<Node*> indexToNode;
372         
373         for (Node* node : m_sinkCandidates) {
374             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
375             nodeToVariable.add(node, variable);
376             ASSERT(indexToNode.size() == variable->index());
377             indexToNode.append(node);
378         }
379         
380         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
381             for (Node* node : *block) {
382                 if (SSACalculator::Variable* variable = nodeToVariable.get(node))
383                     m_ssaCalculator.newDef(variable, block, node);
384                 
385                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
386                     m_ssaCalculator.newDef(
387                         nodeToVariable.get(m_materializationToEscapee.get(materialize)),
388                         block, materialize);
389                 }
390             }
391         }
392         
393         m_ssaCalculator.computePhis(
394             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
395                 Node* allocation = indexToNode[variable->index()];
396                 if (!m_combinedLiveness.liveAtHead[block].contains(allocation))
397                     return nullptr;
398                 
399                 Node* phiNode = m_graph.addNode(allocation->prediction(), Phi, NodeOrigin());
400                 phiNode->mergeFlags(NodeResultJS);
401                 return phiNode;
402             });
403         
404         // Place Phis in the right places. Replace all uses of any allocation with the appropriate
405         // materialization. Create the appropriate Upsilon nodes.
406         LocalOSRAvailabilityCalculator availabilityCalculator;
407         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
408             HashMap<Node*, Node*> mapping;
409             
410             for (Node* candidate : m_combinedLiveness.liveAtHead[block]) {
411                 SSACalculator::Variable* variable = nodeToVariable.get(candidate);
412                 if (!variable)
413                     continue;
414                 
415                 SSACalculator::Def* def = m_ssaCalculator.reachingDefAtHead(block, variable);
416                 if (!def)
417                     continue;
418                 
419                 mapping.set(indexToNode[variable->index()], def->value());
420             }
421             
422             availabilityCalculator.beginBlock(block);
423             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
424                 m_insertionSet.insert(0, phiDef->value());
425                 
426                 Node* originalNode = indexToNode[phiDef->variable()->index()];
427                 insertOSRHintsForUpdate(
428                     m_insertionSet, 0, NodeOrigin(), availabilityCalculator.m_availability,
429                     originalNode, phiDef->value());
430
431                 mapping.set(originalNode, phiDef->value());
432             }
433             
434             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
435                 Node* node = block->at(nodeIndex);
436
437                 for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
438                     Node* escapee = m_materializationToEscapee.get(materialize);
439                     m_insertionSet.insert(nodeIndex, materialize);
440                     insertOSRHintsForUpdate(
441                         m_insertionSet, nodeIndex, node->origin,
442                         availabilityCalculator.m_availability, escapee, materialize);
443                     mapping.set(escapee, materialize);
444                 }
445                     
446                 availabilityCalculator.executeNode(node);
447                 
448                 m_graph.doToChildren(
449                     node,
450                     [&] (Edge& edge) {
451                         if (Node* materialize = mapping.get(edge.node()))
452                             edge.setNode(materialize);
453                     });
454                 
455                 // If you cause an escape, you shouldn't see the original escapee.
456                 if (validationEnabled()) {
457                     handleNode(
458                         node,
459                         [&] () { },
460                         [&] (Node* escapee) {
461                             DFG_ASSERT(m_graph, node, !m_sinkCandidates.contains(escapee));
462                         });
463                 }
464             }
465             
466             NodeAndIndex terminal = block->findTerminal();
467             size_t upsilonInsertionPoint = terminal.index;
468             Node* upsilonWhere = terminal.node;
469             NodeOrigin upsilonOrigin = upsilonWhere->origin;
470             for (BasicBlock* successorBlock : block->successors()) {
471                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
472                     Node* phiNode = phiDef->value();
473                     SSACalculator::Variable* variable = phiDef->variable();
474                     Node* allocation = indexToNode[variable->index()];
475                     
476                     Node* incoming = mapping.get(allocation);
477                     DFG_ASSERT(m_graph, incoming, incoming != allocation);
478                     
479                     m_insertionSet.insertNode(
480                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
481                         OpInfo(phiNode), incoming->defaultEdge());
482                 }
483             }
484             
485             m_insertionSet.execute(block);
486         }
487         
488         // At this point we have dummy materialization nodes along with edges to them. This means
489         // that the part of the control flow graph that prefers to see actual object allocations
490         // is completely fixed up, except for the materializations themselves.
491     }
492     
493     void lowerNonReadingOperationsOnPhantomAllocations()
494     {
495         // Lower everything but reading operations on phantom allocations. We absolutely have to
496         // lower all writes so as to reveal them to the SSA calculator. We cannot lower reads
497         // because the whole point is that those go away completely.
498         
499         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
500             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
501                 Node* node = block->at(nodeIndex);
502                 switch (node->op()) {
503                 case PutByOffset: {
504                     Node* target = node->child2().node();
505                     if (m_sinkCandidates.contains(target)) {
506                         ASSERT(target->isPhantomObjectAllocation());
507                         node->convertToPutByOffsetHint();
508                     }
509                     break;
510                 }
511
512                 case PutClosureVar: {
513                     Node* target = node->child1().node();
514                     if (m_sinkCandidates.contains(target)) {
515                         ASSERT(target->isPhantomActivationAllocation());
516                         node->convertToPutClosureVarHint();
517                     }
518                     break;
519                 }
520                     
521                 case PutStructure: {
522                     Node* target = node->child1().node();
523                     if (m_sinkCandidates.contains(target)) {
524                         ASSERT(target->isPhantomObjectAllocation());
525                         Node* structure = m_insertionSet.insertConstant(
526                             nodeIndex, node->origin, JSValue(node->transition()->next));
527                         node->convertToPutStructureHint(structure);
528                     }
529                     break;
530                 }
531                     
532                 case NewObject: {
533                     if (m_sinkCandidates.contains(node)) {
534                         Node* structure = m_insertionSet.insertConstant(
535                             nodeIndex + 1, node->origin, JSValue(node->structure()));
536                         m_insertionSet.insert(
537                             nodeIndex + 1,
538                             PromotedHeapLocation(StructurePLoc, node).createHint(
539                                 m_graph, node->origin, structure));
540                         node->convertToPhantomNewObject();
541                     }
542                     break;
543                 }
544                     
545                 case MaterializeNewObject: {
546                     if (m_sinkCandidates.contains(node)) {
547                         m_insertionSet.insert(
548                             nodeIndex + 1,
549                             PromotedHeapLocation(StructurePLoc, node).createHint(
550                                 m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
551                         for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
552                             unsigned identifierNumber =
553                                 node->objectMaterializationData().m_properties[i].m_identifierNumber;
554                             m_insertionSet.insert(
555                                 nodeIndex + 1,
556                                 PromotedHeapLocation(
557                                     NamedPropertyPLoc, node, identifierNumber).createHint(
558                                     m_graph, node->origin,
559                                     m_graph.varArgChild(node, i + 1).node()));
560                         }
561                         node->convertToPhantomNewObject();
562                     }
563                     break;
564                 }
565
566                 case NewFunction: {
567                     if (m_sinkCandidates.contains(node)) {
568                         Node* executable = m_insertionSet.insertConstant(
569                             nodeIndex + 1, node->origin, node->cellOperand());
570                         m_insertionSet.insert(
571                             nodeIndex + 1,
572                             PromotedHeapLocation(FunctionExecutablePLoc, node).createHint(
573                                 m_graph, node->origin, executable));
574                         m_insertionSet.insert(
575                             nodeIndex + 1,
576                             PromotedHeapLocation(FunctionActivationPLoc, node).createHint(
577                                 m_graph, node->origin, node->child1().node()));
578                         node->convertToPhantomNewFunction();
579                     }
580                     break;
581                 }
582
583                 case CreateActivation: {
584                     if (m_sinkCandidates.contains(node)) {
585                         m_insertionSet.insert(
586                             nodeIndex + 1,
587                             PromotedHeapLocation(ActivationScopePLoc, node).createHint(
588                                 m_graph, node->origin, node->child1().node()));
589                         node->convertToPhantomCreateActivation();
590                     }
591                     break;
592                 }
593
594                 case MaterializeCreateActivation: {
595                     if (m_sinkCandidates.contains(node)) {
596                         m_insertionSet.insert(
597                             nodeIndex + 1,
598                             PromotedHeapLocation(ActivationScopePLoc, node).createHint(
599                                 m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
600                         ObjectMaterializationData& data = node->objectMaterializationData();
601                         for (unsigned i = 0; i < data.m_properties.size(); ++i) {
602                             unsigned identifierNumber = data.m_properties[i].m_identifierNumber;
603                             m_insertionSet.insert(
604                                 nodeIndex + 1,
605                                 PromotedHeapLocation(
606                                     ClosureVarPLoc, node, identifierNumber).createHint(
607                                     m_graph, node->origin,
608                                     m_graph.varArgChild(node, i + 1).node()));
609                         }
610                         node->convertToPhantomCreateActivation();
611                     }
612                     break;
613                 }
614
615                 case StoreBarrier:
616                 case StoreBarrierWithNullCheck: {
617                     Node* target = node->child1().node();
618                     if (m_sinkCandidates.contains(target)) {
619                         ASSERT(target->isPhantomAllocation());
620                         node->remove();
621                     }
622                     break;
623                 }
624                     
625                 default:
626                     break;
627                 }
628                 
629                 m_graph.doToChildren(
630                     node,
631                     [&] (Edge& edge) {
632                         if (m_sinkCandidates.contains(edge.node()))
633                             edge.setUseKind(KnownCellUse);
634                     });
635             }
636             m_insertionSet.execute(block);
637         }
638     }
639     
640     void promoteSunkenFields()
641     {
642         // Collect the set of heap locations that we will be operating over.
643         HashSet<PromotedHeapLocation> locations;
644         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
645             for (Node* node : *block) {
646                 promoteHeapAccess(
647                     node,
648                     [&] (PromotedHeapLocation location, Edge) {
649                         if (m_sinkCandidates.contains(location.base()))
650                             locations.add(location);
651                     },
652                     [&] (PromotedHeapLocation location) {
653                         if (m_sinkCandidates.contains(location.base()))
654                             locations.add(location);
655                     });
656             }
657         }
658         
659         // Figure out which locations belong to which allocations.
660         m_locationsForAllocation.clear();
661         for (PromotedHeapLocation location : locations) {
662             auto result = m_locationsForAllocation.add(location.base(), Vector<PromotedHeapLocation>());
663             ASSERT(!result.iterator->value.contains(location));
664             result.iterator->value.append(location);
665         }
666         
667         // For each sunken thingy, make sure we create Bottom values for all of its fields.
668         // Note that this has the hilarious slight inefficiency of creating redundant hints for
669         // things that were previously materializations. This should only impact compile times and
670         // not code quality, and it's necessary for soundness without some data structure hackage.
671         // For example, a MaterializeNewObject that we choose to sink may have new fields added to
672         // it conditionally. That would necessitate Bottoms.
673         Node* bottom = nullptr;
674         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
675             if (block == m_graph.block(0))
676                 bottom = m_insertionSet.insertConstant(0, NodeOrigin(), jsUndefined());
677             
678             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
679                 Node* node = block->at(nodeIndex);
680                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
681                     m_insertionSet.insert(
682                         nodeIndex + 1, location.createHint(m_graph, node->origin, bottom));
683                 }
684             }
685             m_insertionSet.execute(block);
686         }
687
688         m_ssaCalculator.reset();
689
690         // Collect the set of "variables" that we will be sinking.
691         m_locationToVariable.clear();
692         m_indexToLocation.clear();
693         for (PromotedHeapLocation location : locations) {
694             SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
695             m_locationToVariable.add(location, variable);
696             ASSERT(m_indexToLocation.size() == variable->index());
697             m_indexToLocation.append(location);
698         }
699         
700         // Create Defs from the existing hints.
701         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
702             for (Node* node : *block) {
703                 promoteHeapAccess(
704                     node,
705                     [&] (PromotedHeapLocation location, Edge value) {
706                         if (!m_sinkCandidates.contains(location.base()))
707                             return;
708                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
709                         m_ssaCalculator.newDef(variable, block, value.node());
710                     },
711                     [&] (PromotedHeapLocation) { });
712             }
713         }
714         
715         // OMG run the SSA calculator to create Phis!
716         m_ssaCalculator.computePhis(
717             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
718                 PromotedHeapLocation location = m_indexToLocation[variable->index()];
719                 if (!m_combinedLiveness.liveAtHead[block].contains(location.base()))
720                     return nullptr;
721                 
722                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
723                 phiNode->mergeFlags(NodeResultJS);
724                 return phiNode;
725             });
726         
727         // Place Phis in the right places, replace all uses of any load with the appropriate
728         // value, and create the appropriate Upsilon nodes.
729         m_graph.clearReplacements();
730         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
731             // This mapping table is intended to be lazy. If something is omitted from the table,
732             // it means that there haven't been any local stores to that promoted heap location.
733             m_localMapping.clear();
734             
735             // Insert the Phi functions that we had previously created.
736             for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
737                 PromotedHeapLocation location = m_indexToLocation[phiDef->variable()->index()];
738                 
739                 m_insertionSet.insert(
740                     0, phiDef->value());
741                 m_insertionSet.insert(
742                     0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
743                 m_localMapping.add(location, phiDef->value());
744             }
745             
746             if (verbose)
747                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
748             
749             // Process the block and replace all uses of loads with the promoted value.
750             for (Node* node : *block) {
751                 m_graph.performSubstitution(node);
752                 
753                 if (Node* escapee = m_materializationToEscapee.get(node))
754                     populateMaterialize(block, node, escapee);
755                 
756                 promoteHeapAccess(
757                     node,
758                     [&] (PromotedHeapLocation location, Edge value) {
759                         if (m_sinkCandidates.contains(location.base()))
760                             m_localMapping.set(location, value.node());
761                     },
762                     [&] (PromotedHeapLocation location) {
763                         if (m_sinkCandidates.contains(location.base())) {
764                             switch (node->op()) {
765                             case CheckStructure:
766                                 node->convertToCheckStructureImmediate(resolve(block, location));
767                                 break;
768
769                             default:
770                                 node->replaceWith(resolve(block, location));
771                                 break;
772                             }
773                         }
774                     });
775             }
776             
777             // Gotta drop some Upsilons.
778             NodeAndIndex terminal = block->findTerminal();
779             size_t upsilonInsertionPoint = terminal.index;
780             NodeOrigin upsilonOrigin = terminal.node->origin;
781             for (BasicBlock* successorBlock : block->successors()) {
782                 for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
783                     Node* phiNode = phiDef->value();
784                     SSACalculator::Variable* variable = phiDef->variable();
785                     PromotedHeapLocation location = m_indexToLocation[variable->index()];
786                     Node* incoming = resolve(block, location);
787                     
788                     m_insertionSet.insertNode(
789                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
790                         OpInfo(phiNode), incoming->defaultEdge());
791                 }
792             }
793             
794             m_insertionSet.execute(block);
795         }
796     }
797     
798     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
799     {
800         if (Node* result = m_localMapping.get(location))
801             return result;
802         
803         // This implies that there is no local mapping. Find a non-local mapping.
804         SSACalculator::Def* def = m_ssaCalculator.nonLocalReachingDef(
805             block, m_locationToVariable.get(location));
806         ASSERT(def);
807         ASSERT(def->value());
808         m_localMapping.add(location, def->value());
809         return def->value();
810     }
811
812     template<typename SinkCandidateFunctor, typename EscapeFunctor>
813     void handleNode(
814         Node* node,
815         const SinkCandidateFunctor& sinkCandidate,
816         const EscapeFunctor& escape)
817     {
818         switch (node->op()) {
819         case NewObject:
820         case MaterializeNewObject:
821         case MaterializeCreateActivation:
822             sinkCandidate();
823             m_graph.doToChildren(
824                 node,
825                 [&] (Edge edge) {
826                     escape(edge.node());
827                 });
828             break;
829
830         case NewFunction:
831             if (!node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid())
832                 sinkCandidate();
833             escape(node->child1().node());
834             break;
835
836         case CreateActivation:
837             if (!m_graph.symbolTableFor(node->origin.semantic)->singletonScope()->isStillValid())
838                 sinkCandidate();
839             escape(node->child1().node());
840             break;
841
842         case Check:
843             m_graph.doToChildren(
844                 node,
845                 [&] (Edge edge) {
846                     if (edge.willNotHaveCheck())
847                         return;
848                     
849                     if (alreadyChecked(edge.useKind(), SpecObject))
850                         return;
851                     
852                     escape(edge.node());
853                 });
854             break;
855
856         case MovHint:
857         case PutHint:
858         case StoreBarrier:
859         case StoreBarrierWithNullCheck:
860             break;
861
862         case PutStructure:
863         case CheckStructure:
864         case GetByOffset:
865         case MultiGetByOffset:
866         case GetGetterSetterByOffset: {
867             Node* target = node->child1().node();
868             if (!target->isObjectAllocation())
869                 escape(target);
870             break;
871         }
872             
873         case PutByOffset: {
874             Node* target = node->child2().node();
875             if (!target->isObjectAllocation()) {
876                 escape(target);
877                 escape(node->child1().node());
878             }
879             escape(node->child3().node());
880             break;
881         }
882
883         case PutClosureVar: {
884             Node* target = node->child1().node();
885             if (!target->isActivationAllocation())
886                 escape(target);
887             escape(node->child2().node());
888             break;
889         }
890             
891         case MultiPutByOffset:
892             // FIXME: In the future we should be able to handle this. It's just a matter of
893             // building the appropriate *Hint variant of this instruction, along with a
894             // PhantomStructureSelect node - since this transforms the Structure in a conditional
895             // way.
896             // https://bugs.webkit.org/show_bug.cgi?id=136924
897             escape(node->child1().node());
898             escape(node->child2().node());
899             break;
900
901         default:
902             m_graph.doToChildren(
903                 node,
904                 [&] (Edge edge) {
905                     escape(edge.node());
906                 });
907             break;
908         }
909     }
910     
911     Node* createMaterialize(Node* escapee, Node* where)
912     {
913         Node* result = nullptr;
914         
915         switch (escapee->op()) {
916         case NewObject:
917         case MaterializeNewObject: {
918             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
919             
920             result = m_graph.addNode(
921                 escapee->prediction(), Node::VarArg, MaterializeNewObject,
922                 NodeOrigin(
923                     escapee->origin.semantic,
924                     where->origin.forExit),
925                 OpInfo(data), OpInfo(), 0, 0);
926             break;
927         }
928
929         case NewFunction:
930             result = m_graph.addNode(
931                 escapee->prediction(), NewFunction,
932                 NodeOrigin(
933                     escapee->origin.semantic,
934                     where->origin.forExit),
935                 OpInfo(escapee->cellOperand()),
936                 escapee->child1());
937             break;
938
939         case CreateActivation:
940         case MaterializeCreateActivation: {
941             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
942
943             result = m_graph.addNode(
944                 escapee->prediction(), Node::VarArg, MaterializeCreateActivation,
945                 NodeOrigin(
946                     escapee->origin.semantic,
947                     where->origin.forExit),
948                 OpInfo(data), OpInfo(), 0, 0);
949             break;
950         }
951
952         default:
953             DFG_CRASH(m_graph, escapee, "Bad escapee op");
954             break;
955         }
956         
957         if (verbose)
958             dataLog("Creating materialization point at ", where, " for ", escapee, ": ", result, "\n");
959         
960         m_materializationToEscapee.add(result, escapee);
961         m_materializationSiteToMaterializations.add(
962             where, Vector<Node*>()).iterator->value.append(result);
963         
964         return result;
965     }
966     
967     void populateMaterialize(BasicBlock* block, Node* node, Node* escapee)
968     {
969         switch (node->op()) {
970         case MaterializeNewObject: {
971             ObjectMaterializationData& data = node->objectMaterializationData();
972             unsigned firstChild = m_graph.m_varArgChildren.size();
973             
974             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
975             
976             PromotedHeapLocation structure(StructurePLoc, escapee);
977             ASSERT(locations.contains(structure));
978             
979             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
980             
981             for (unsigned i = 0; i < locations.size(); ++i) {
982                 switch (locations[i].kind()) {
983                 case StructurePLoc: {
984                     ASSERT(locations[i] == structure);
985                     break;
986                 }
987                     
988                 case NamedPropertyPLoc: {
989                     Node* value = resolve(block, locations[i]);
990                     if (value->op() == BottomValue) {
991                         // We can skip Bottoms entirely.
992                         break;
993                     }
994                     
995                     data.m_properties.append(PhantomPropertyValue(locations[i].info()));
996                     m_graph.m_varArgChildren.append(value);
997                     break;
998                 }
999                     
1000                 default:
1001                     DFG_CRASH(m_graph, node, "Bad location kind");
1002                 }
1003             }
1004             
1005             node->children = AdjacencyList(
1006                 AdjacencyList::Variable,
1007                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1008             break;
1009         }
1010
1011         case MaterializeCreateActivation: {
1012             ObjectMaterializationData& data = node->objectMaterializationData();
1013
1014             unsigned firstChild = m_graph.m_varArgChildren.size();
1015
1016             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1017
1018             PromotedHeapLocation scope(ActivationScopePLoc, escapee);
1019             ASSERT(locations.contains(scope));
1020
1021             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
1022
1023             for (unsigned i = 0; i < locations.size(); ++i) {
1024                 switch (locations[i].kind()) {
1025                 case ActivationScopePLoc: {
1026                     ASSERT(locations[i] == scope);
1027                     break;
1028                 }
1029
1030                 case ClosureVarPLoc: {
1031                     Node* value = resolve(block, locations[i]);
1032                     if (value->op() == BottomValue)
1033                         break;
1034
1035                     data.m_properties.append(PhantomPropertyValue(locations[i].info()));
1036                     m_graph.m_varArgChildren.append(value);
1037                     break;
1038                 }
1039
1040                 default:
1041                     DFG_CRASH(m_graph, node, "Bad location kind");
1042                 }
1043             }
1044
1045             node->children = AdjacencyList(
1046                 AdjacencyList::Variable,
1047                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1048             break;
1049         }
1050
1051         case NewFunction: {
1052             if (!ASSERT_DISABLED) {
1053                 Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1054
1055                 ASSERT(locations.size() == 2);
1056
1057                 PromotedHeapLocation executable(FunctionExecutablePLoc, escapee);
1058                 ASSERT(locations.contains(executable));
1059
1060                 PromotedHeapLocation activation(FunctionActivationPLoc, escapee);
1061                 ASSERT(locations.contains(activation));
1062
1063                 for (unsigned i = 0; i < locations.size(); ++i) {
1064                     switch (locations[i].kind()) {
1065                     case FunctionExecutablePLoc: {
1066                         ASSERT(locations[i] == executable);
1067                         break;
1068                     }
1069
1070                     case FunctionActivationPLoc: {
1071                         ASSERT(locations[i] == activation);
1072                         break;
1073                     }
1074
1075                     default:
1076                         DFG_CRASH(m_graph, node, "Bad location kind");
1077                     }
1078                 }
1079             }
1080
1081             break;
1082         }
1083
1084         default:
1085             DFG_CRASH(m_graph, node, "Bad materialize op");
1086             break;
1087         }
1088     }
1089     
1090     CombinedLiveness m_combinedLiveness;
1091     SSACalculator m_ssaCalculator;
1092     HashSet<Node*> m_sinkCandidates;
1093     HashMap<Node*, Node*> m_materializationToEscapee;
1094     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
1095     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
1096     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
1097     Vector<PromotedHeapLocation> m_indexToLocation;
1098     HashMap<PromotedHeapLocation, Node*> m_localMapping;
1099     InsertionSet m_insertionSet;
1100 };
1101     
1102 bool performObjectAllocationSinking(Graph& graph)
1103 {
1104     SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
1105     return runPhase<ObjectAllocationSinkingPhase>(graph);
1106 }
1107
1108 } } // namespace JSC::DFG
1109
1110 #endif // ENABLE(DFG_JIT)
1111