Unreviewed, rolling out r225362.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGClobbersExitState.h"
33 #include "DFGCombinedLiveness.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertionSet.h"
36 #include "DFGLazyNode.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromotedHeapLocation.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 #include <list>
46
47 namespace JSC { namespace DFG {
48
49 namespace {
50
51 namespace DFGObjectAllocationSinkingPhaseInternal {
52 static const bool verbose = false;
53 }
54
55 // In order to sink object cycles, we use a points-to analysis coupled
56 // with an escape analysis. This analysis is actually similar to an
57 // abstract interpreter focused on local allocations and ignoring
58 // everything else.
59 //
60 // We represent the local heap using two mappings:
61 //
62 // - A set of the local allocations present in the function, where
63 //   each of those have a further mapping from
64 //   PromotedLocationDescriptor to local allocations they must point
65 //   to.
66 //
67 // - A "pointer" mapping from nodes to local allocations, if they must
68 //   be equal to said local allocation and are currently live. This
69 //   can be because the node is the actual node that created the
70 //   allocation, or any other node that must currently point to it -
71 //   we don't make a difference.
72 //
73 // The following graph is a motivation for why we separate allocations
74 // from pointers:
75 //
76 // Block #0
77 //  0: NewObject({})
78 //  1: NewObject({})
79 //  -: PutByOffset(@0, @1, x)
80 //  -: PutStructure(@0, {x:0})
81 //  2: GetByOffset(@0, x)
82 //  -: Jump(#1)
83 //
84 // Block #1
85 //  -: Return(@2)
86 //
87 // Here, we need to remember in block #1 that @2 points to a local
88 // allocation with appropriate fields and structures information
89 // (because we should be able to place a materialization on top of
90 // block #1 here), even though @1 is dead. We *could* just keep @1
91 // artificially alive here, but there is no real reason to do it:
92 // after all, by the end of block #0, @1 and @2 should be completely
93 // interchangeable, and there is no reason for us to artificially make
94 // @1 more important.
95 //
96 // An important point to consider to understand this separation is
97 // that we should think of the local heap as follow: we have a
98 // bunch of nodes that are pointers to "allocations" that live
99 // someplace on the heap, and those allocations can have pointers in
100 // between themselves as well. We shouldn't care about whatever
101 // names we give to the allocations ; what matters when
102 // comparing/merging two heaps is the isomorphism/comparison between
103 // the allocation graphs as seen by the nodes.
104 //
105 // For instance, in the following graph:
106 //
107 // Block #0
108 //  0: NewObject({})
109 //  -: Branch(#1, #2)
110 //
111 // Block #1
112 //  1: NewObject({})
113 //  -: PutByOffset(@0, @1, x)
114 //  -: PutStructure(@0, {x:0})
115 //  -: Jump(#3)
116 //
117 // Block #2
118 //  2: NewObject({})
119 //  -: PutByOffset(@2, undefined, x)
120 //  -: PutStructure(@2, {x:0})
121 //  -: PutByOffset(@0, @2, x)
122 //  -: PutStructure(@0, {x:0})
123 //  -: Jump(#3)
124 //
125 // Block #3
126 //  -: Return(@0)
127 //
128 // we should think of the heaps at tail of blocks #1 and #2 as being
129 // exactly the same, even though one has @0.x pointing to @1 and the
130 // other has @0.x pointing to @2, because in essence this should not
131 // be different from the graph where we hoisted @1 and @2 into a
132 // single allocation in block #0. We currently will not handle this
133 // case, because we merge allocations based on the node they are
134 // coming from, but this is only a technicality for the sake of
135 // simplicity that shouldn't hide the deeper idea outlined here.
136
137 class Allocation {
138 public:
139     // We use Escaped as a special allocation kind because when we
140     // decide to sink an allocation, we still need to keep track of it
141     // once it is escaped if it still has pointers to it in order to
142     // replace any use of those pointers by the corresponding
143     // materialization
144     enum class Kind { Escaped, Object, Activation, Function, GeneratorFunction, AsyncFunction, AsyncGeneratorFunction };
145
146     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
147         : m_identifier(identifier)
148         , m_kind(kind)
149     {
150     }
151
152
153     const HashMap<PromotedLocationDescriptor, Node*>& fields() const
154     {
155         return m_fields;
156     }
157
158     Node* get(PromotedLocationDescriptor descriptor)
159     {
160         return m_fields.get(descriptor);
161     }
162
163     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
164     {
165         // Pointing to anything else than an unescaped local
166         // allocation is represented by simply not having the
167         // field
168         if (value)
169             m_fields.set(descriptor, value);
170         else
171             m_fields.remove(descriptor);
172         return *this;
173     }
174
175     void remove(PromotedLocationDescriptor descriptor)
176     {
177         set(descriptor, nullptr);
178     }
179
180     bool hasStructures() const
181     {
182         switch (kind()) {
183         case Kind::Object:
184             return true;
185
186         default:
187             return false;
188         }
189     }
190
191     Allocation& setStructures(const RegisteredStructureSet& structures)
192     {
193         ASSERT(hasStructures() && !structures.isEmpty());
194         m_structures = structures;
195         return *this;
196     }
197
198     Allocation& mergeStructures(const RegisteredStructureSet& structures)
199     {
200         ASSERT(hasStructures() || structures.isEmpty());
201         m_structures.merge(structures);
202         return *this;
203     }
204
205     Allocation& filterStructures(const RegisteredStructureSet& structures)
206     {
207         ASSERT(hasStructures());
208         m_structures.filter(structures);
209         RELEASE_ASSERT(!m_structures.isEmpty());
210         return *this;
211     }
212
213     const RegisteredStructureSet& structures() const
214     {
215         return m_structures;
216     }
217
218     Node* identifier() const { return m_identifier; }
219
220     Kind kind() const { return m_kind; }
221
222     bool isEscapedAllocation() const
223     {
224         return kind() == Kind::Escaped;
225     }
226
227     bool isObjectAllocation() const
228     {
229         return m_kind == Kind::Object;
230     }
231
232     bool isActivationAllocation() const
233     {
234         return m_kind == Kind::Activation;
235     }
236
237     bool isFunctionAllocation() const
238     {
239         return m_kind == Kind::Function || m_kind == Kind::GeneratorFunction || m_kind == Kind::AsyncFunction;
240     }
241
242     bool operator==(const Allocation& other) const
243     {
244         return m_identifier == other.m_identifier
245             && m_kind == other.m_kind
246             && m_fields == other.m_fields
247             && m_structures == other.m_structures;
248     }
249
250     bool operator!=(const Allocation& other) const
251     {
252         return !(*this == other);
253     }
254
255     void dump(PrintStream& out) const
256     {
257         dumpInContext(out, nullptr);
258     }
259
260     void dumpInContext(PrintStream& out, DumpContext* context) const
261     {
262         switch (m_kind) {
263         case Kind::Escaped:
264             out.print("Escaped");
265             break;
266
267         case Kind::Object:
268             out.print("Object");
269             break;
270
271         case Kind::Function:
272             out.print("Function");
273             break;
274
275         case Kind::GeneratorFunction:
276             out.print("GeneratorFunction");
277             break;
278
279         case Kind::AsyncFunction:
280             out.print("AsyncFunction");
281             break;
282
283         case Kind::AsyncGeneratorFunction:
284             out.print("AsyncGeneratorFunction");
285             break;
286         case Kind::Activation:
287             out.print("Activation");
288             break;
289         }
290         out.print("Allocation(");
291         if (!m_structures.isEmpty())
292             out.print(inContext(m_structures.toStructureSet(), context));
293         if (!m_fields.isEmpty()) {
294             if (!m_structures.isEmpty())
295                 out.print(", ");
296             out.print(mapDump(m_fields, " => #", ", "));
297         }
298         out.print(")");
299     }
300
301 private:
302     Node* m_identifier; // This is the actual node that created the allocation
303     Kind m_kind;
304     HashMap<PromotedLocationDescriptor, Node*> m_fields;
305     RegisteredStructureSet m_structures;
306 };
307
308 class LocalHeap {
309 public:
310     Allocation& newAllocation(Node* node, Allocation::Kind kind)
311     {
312         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
313         m_pointers.add(node, node);
314         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
315     }
316
317     bool isAllocation(Node* identifier) const
318     {
319         return m_allocations.contains(identifier);
320     }
321
322     // Note that this is fundamentally different from
323     // onlyLocalAllocation() below. getAllocation() takes as argument
324     // a node-as-identifier, that is, an allocation node. This
325     // allocation node doesn't have to be alive; it may only be
326     // pointed to by other nodes or allocation fields.
327     // For instance, in the following graph:
328     //
329     // Block #0
330     //  0: NewObject({})
331     //  1: NewObject({})
332     //  -: PutByOffset(@0, @1, x)
333     //  -: PutStructure(@0, {x:0})
334     //  2: GetByOffset(@0, x)
335     //  -: Jump(#1)
336     //
337     // Block #1
338     //  -: Return(@2)
339     //
340     // At head of block #1, the only reachable allocation is #@1,
341     // which can be reached through node @2. Thus, getAllocation(#@1)
342     // contains the appropriate metadata for this allocation, but
343     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
344     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
345     // is the same as getAllocation(#@1), while getAllocation(#@2)
346     // does not make sense since @2 is not an allocation node.
347     //
348     // This is meant to be used when the node is already known to be
349     // an identifier (i.e. an allocation) - probably because it was
350     // found as value of a field or pointer in the current heap, or
351     // was the result of a call to follow(). In any other cases (such
352     // as when doing anything while traversing the graph), the
353     // appropriate function to call is probably onlyLocalAllocation.
354     Allocation& getAllocation(Node* identifier)
355     {
356         auto iter = m_allocations.find(identifier);
357         ASSERT(iter != m_allocations.end());
358         return iter->value;
359     }
360
361     void newPointer(Node* node, Node* identifier)
362     {
363         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
364         ASSERT(isAllocation(identifier));
365         m_pointers.add(node, identifier);
366     }
367
368     // follow solves the points-to problem. Given a live node, which
369     // may be either an allocation itself or a heap read (e.g. a
370     // GetByOffset node), it returns the corresponding allocation
371     // node, if there is one. If the argument node is neither an
372     // allocation or a heap read, or may point to different nodes,
373     // nullptr will be returned. Note that a node that points to
374     // different nodes can never point to an unescaped local
375     // allocation.
376     Node* follow(Node* node) const
377     {
378         auto iter = m_pointers.find(node);
379         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
380         return iter == m_pointers.end() ? nullptr : iter->value;
381     }
382
383     Node* follow(PromotedHeapLocation location) const
384     {
385         const Allocation& base = m_allocations.find(location.base())->value;
386         auto iter = base.fields().find(location.descriptor());
387
388         if (iter == base.fields().end())
389             return nullptr;
390
391         return iter->value;
392     }
393
394     // onlyLocalAllocation find the corresponding allocation metadata
395     // for any live node. onlyLocalAllocation(node) is essentially
396     // getAllocation(follow(node)), with appropriate null handling.
397     Allocation* onlyLocalAllocation(Node* node)
398     {
399         Node* identifier = follow(node);
400         if (!identifier)
401             return nullptr;
402
403         return &getAllocation(identifier);
404     }
405
406     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
407     {
408         Node* identifier = follow(location);
409         if (!identifier)
410             return nullptr;
411
412         return &getAllocation(identifier);
413     }
414
415     // This allows us to store the escapees only when necessary. If
416     // set, the current escapees can be retrieved at any time using
417     // takeEscapees(), which will clear the cached set of escapees;
418     // otherwise the heap won't remember escaping allocations.
419     void setWantEscapees()
420     {
421         m_wantEscapees = true;
422     }
423
424     HashMap<Node*, Allocation> takeEscapees()
425     {
426         return WTFMove(m_escapees);
427     }
428
429     void escape(Node* node)
430     {
431         Node* identifier = follow(node);
432         if (!identifier)
433             return;
434
435         escapeAllocation(identifier);
436     }
437
438     void merge(const LocalHeap& other)
439     {
440         assertIsValid();
441         other.assertIsValid();
442         ASSERT(!m_wantEscapees);
443
444         if (!reached()) {
445             ASSERT(other.reached());
446             *this = other;
447             return;
448         }
449
450         NodeSet toEscape;
451
452         for (auto& allocationEntry : other.m_allocations)
453             m_allocations.add(allocationEntry.key, allocationEntry.value);
454         for (auto& allocationEntry : m_allocations) {
455             auto allocationIter = other.m_allocations.find(allocationEntry.key);
456
457             // If we have it and they don't, it died for them but we
458             // are keeping it alive from another field somewhere.
459             // There is nothing to do - we will be escaped
460             // automatically when we handle that other field.
461             // This will also happen for allocation that we have and
462             // they don't, and all of those will get pruned.
463             if (allocationIter == other.m_allocations.end())
464                 continue;
465
466             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
467                 toEscape.addVoid(allocationEntry.key);
468                 for (const auto& fieldEntry : allocationIter->value.fields())
469                     toEscape.addVoid(fieldEntry.value);
470             } else {
471                 mergePointerSets(
472                     allocationEntry.value.fields(), allocationIter->value.fields(),
473                     [&] (Node* identifier) {
474                         toEscape.addVoid(identifier);
475                     },
476                     [&] (PromotedLocationDescriptor field) {
477                         allocationEntry.value.remove(field);
478                     });
479                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
480             }
481         }
482
483         mergePointerSets(m_pointers, other.m_pointers,
484             [&] (Node* identifier) {
485                 toEscape.addVoid(identifier);
486             },
487             [&] (Node* field) {
488                 m_pointers.remove(field);
489             });
490
491         for (Node* identifier : toEscape)
492             escapeAllocation(identifier);
493
494         if (!ASSERT_DISABLED) {
495             for (const auto& entry : m_allocations)
496                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
497         }
498
499         // If there is no remaining pointer to an allocation, we can
500         // remove it. This should only happen for escaped allocations,
501         // because we only merge liveness-pruned heaps in the first
502         // place.
503         prune();
504
505         assertIsValid();
506     }
507
508     void pruneByLiveness(const NodeSet& live)
509     {
510         Vector<Node*> toRemove;
511         for (const auto& entry : m_pointers) {
512             if (!live.contains(entry.key))
513                 toRemove.append(entry.key);
514         }
515         for (Node* node : toRemove)
516             m_pointers.remove(node);
517
518         prune();
519     }
520
521     void assertIsValid() const
522     {
523         if (ASSERT_DISABLED)
524             return;
525
526         // Pointers should point to an actual allocation
527         for (const auto& entry : m_pointers) {
528             ASSERT_UNUSED(entry, entry.value);
529             ASSERT(m_allocations.contains(entry.value));
530         }
531
532         for (const auto& allocationEntry : m_allocations) {
533             // Fields should point to an actual allocation
534             for (const auto& fieldEntry : allocationEntry.value.fields()) {
535                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
536                 ASSERT(m_allocations.contains(fieldEntry.value));
537             }
538         }
539     }
540
541     bool operator==(const LocalHeap& other) const
542     {
543         assertIsValid();
544         other.assertIsValid();
545         return m_allocations == other.m_allocations
546             && m_pointers == other.m_pointers;
547     }
548
549     bool operator!=(const LocalHeap& other) const
550     {
551         return !(*this == other);
552     }
553
554     const HashMap<Node*, Allocation>& allocations() const
555     {
556         return m_allocations;
557     }
558
559     const HashMap<Node*, Node*>& pointers() const
560     {
561         return m_pointers;
562     }
563
564     void dump(PrintStream& out) const
565     {
566         out.print("  Allocations:\n");
567         for (const auto& entry : m_allocations)
568             out.print("    #", entry.key, ": ", entry.value, "\n");
569         out.print("  Pointers:\n");
570         for (const auto& entry : m_pointers)
571             out.print("    ", entry.key, " => #", entry.value, "\n");
572     }
573
574     bool reached() const
575     {
576         return m_reached;
577     }
578
579     void setReached()
580     {
581         m_reached = true;
582     }
583
584 private:
585     // When we merge two heaps, we escape all fields of allocations,
586     // unless they point to the same thing in both heaps.
587     // The reason for this is that it allows us not to do extra work
588     // for diamond graphs where we would otherwise have to check
589     // whether we have a single definition or not, which would be
590     // cumbersome.
591     //
592     // Note that we should try to unify nodes even when they are not
593     // from the same allocation; for instance we should be able to
594     // completely eliminate all allocations from the following graph:
595     //
596     // Block #0
597     //  0: NewObject({})
598     //  -: Branch(#1, #2)
599     //
600     // Block #1
601     //  1: NewObject({})
602     //  -: PutByOffset(@1, "left", val)
603     //  -: PutStructure(@1, {val:0})
604     //  -: PutByOffset(@0, @1, x)
605     //  -: PutStructure(@0, {x:0})
606     //  -: Jump(#3)
607     //
608     // Block #2
609     //  2: NewObject({})
610     //  -: PutByOffset(@2, "right", val)
611     //  -: PutStructure(@2, {val:0})
612     //  -: PutByOffset(@0, @2, x)
613     //  -: PutStructure(@0, {x:0})
614     //  -: Jump(#3)
615     //
616     // Block #3:
617     //  3: GetByOffset(@0, x)
618     //  4: GetByOffset(@3, val)
619     //  -: Return(@4)
620     template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
621     void mergePointerSets(
622         const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
623         const EscapeFunctor& escape, const RemoveFunctor& remove)
624     {
625         Vector<Key> toRemove;
626         for (const auto& entry : my) {
627             auto iter = their.find(entry.key);
628             if (iter == their.end()) {
629                 toRemove.append(entry.key);
630                 escape(entry.value);
631             } else if (iter->value != entry.value) {
632                 toRemove.append(entry.key);
633                 escape(entry.value);
634                 escape(iter->value);
635             }
636         }
637         for (const auto& entry : their) {
638             if (my.contains(entry.key))
639                 continue;
640             escape(entry.value);
641         }
642         for (Key key : toRemove)
643             remove(key);
644     }
645
646     void escapeAllocation(Node* identifier)
647     {
648         Allocation& allocation = getAllocation(identifier);
649         if (allocation.isEscapedAllocation())
650             return;
651
652         Allocation unescaped = WTFMove(allocation);
653         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
654
655         for (const auto& entry : unescaped.fields())
656             escapeAllocation(entry.value);
657
658         if (m_wantEscapees)
659             m_escapees.add(unescaped.identifier(), WTFMove(unescaped));
660     }
661
662     void prune()
663     {
664         NodeSet reachable;
665         for (const auto& entry : m_pointers)
666             reachable.addVoid(entry.value);
667
668         // Repeatedly mark as reachable allocations in fields of other
669         // reachable allocations
670         {
671             Vector<Node*> worklist;
672             worklist.appendRange(reachable.begin(), reachable.end());
673
674             while (!worklist.isEmpty()) {
675                 Node* identifier = worklist.takeLast();
676                 Allocation& allocation = m_allocations.find(identifier)->value;
677                 for (const auto& entry : allocation.fields()) {
678                     if (reachable.add(entry.value).isNewEntry)
679                         worklist.append(entry.value);
680                 }
681             }
682         }
683
684         // Remove unreachable allocations
685         {
686             Vector<Node*> toRemove;
687             for (const auto& entry : m_allocations) {
688                 if (!reachable.contains(entry.key))
689                     toRemove.append(entry.key);
690             }
691             for (Node* identifier : toRemove)
692                 m_allocations.remove(identifier);
693         }
694     }
695
696     bool m_reached = false;
697     HashMap<Node*, Node*> m_pointers;
698     HashMap<Node*, Allocation> m_allocations;
699
700     bool m_wantEscapees = false;
701     HashMap<Node*, Allocation> m_escapees;
702 };
703
704 class ObjectAllocationSinkingPhase : public Phase {
705 public:
706     ObjectAllocationSinkingPhase(Graph& graph)
707         : Phase(graph, "object allocation elimination")
708         , m_pointerSSA(graph)
709         , m_allocationSSA(graph)
710         , m_insertionSet(graph)
711     {
712     }
713
714     bool run()
715     {
716         ASSERT(m_graph.m_form == SSA);
717         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
718
719         if (!performSinking())
720             return false;
721
722         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
723             dataLog("Graph after elimination:\n");
724             m_graph.dump();
725         }
726
727         return true;
728     }
729
730 private:
731     bool performSinking()
732     {
733         m_graph.computeRefCounts();
734         m_graph.initializeNodeOwners();
735         m_graph.ensureSSADominators();
736         performLivenessAnalysis(m_graph);
737         performOSRAvailabilityAnalysis(m_graph);
738         m_combinedLiveness = CombinedLiveness(m_graph);
739
740         CString graphBeforeSinking;
741         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
742             StringPrintStream out;
743             m_graph.dump(out);
744             graphBeforeSinking = out.toCString();
745         }
746
747         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
748             dataLog("Graph before elimination:\n");
749             m_graph.dump();
750         }
751
752         performAnalysis();
753
754         if (!determineSinkCandidates())
755             return false;
756
757         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
758             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
759                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
760                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
761             }
762         }
763
764         promoteLocalHeap();
765
766         if (Options::validateGraphAtEachPhase())
767             DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
768         return true;
769     }
770
771     void performAnalysis()
772     {
773         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
774         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
775
776         bool changed;
777         do {
778             if (DFGObjectAllocationSinkingPhaseInternal::verbose)
779                 dataLog("Doing iteration of escape analysis.\n");
780             changed = false;
781
782             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
783                 m_heapAtHead[block].setReached();
784                 m_heap = m_heapAtHead[block];
785
786                 for (Node* node : *block) {
787                     handleNode(
788                         node,
789                         [] (PromotedHeapLocation, LazyNode) { },
790                         [&] (PromotedHeapLocation) -> Node* {
791                             return nullptr;
792                         });
793                 }
794
795                 if (m_heap == m_heapAtTail[block])
796                     continue;
797
798                 m_heapAtTail[block] = m_heap;
799                 changed = true;
800
801                 m_heap.assertIsValid();
802
803                 // We keep only pointers that are live, and only
804                 // allocations that are either live, pointed to by a
805                 // live pointer, or (recursively) stored in a field of
806                 // a live allocation.
807                 //
808                 // This means we can accidentaly leak non-dominating
809                 // nodes into the successor. However, due to the
810                 // non-dominance property, we are guaranteed that the
811                 // successor has at least one predecessor that is not
812                 // dominated either: this means any reference to a
813                 // non-dominating allocation in the successor will
814                 // trigger an escape and get pruned during the merge.
815                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
816
817                 for (BasicBlock* successorBlock : block->successors())
818                     m_heapAtHead[successorBlock].merge(m_heap);
819             }
820         } while (changed);
821     }
822
823     template<typename WriteFunctor, typename ResolveFunctor>
824     void handleNode(
825         Node* node,
826         const WriteFunctor& heapWrite,
827         const ResolveFunctor& heapResolve)
828     {
829         m_heap.assertIsValid();
830         ASSERT(m_heap.takeEscapees().isEmpty());
831
832         Allocation* target = nullptr;
833         HashMap<PromotedLocationDescriptor, LazyNode> writes;
834         PromotedLocationDescriptor exactRead;
835
836         switch (node->op()) {
837         case NewObject:
838             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
839             target->setStructures(node->structure());
840             writes.add(
841                 StructurePLoc, LazyNode(m_graph.freeze(node->structure().get())));
842             break;
843
844         case NewFunction:
845         case NewGeneratorFunction:
846         case NewAsyncGeneratorFunction:
847         case NewAsyncFunction: {
848             if (isStillValid(node->castOperand<FunctionExecutable*>()->singletonFunction())) {
849                 m_heap.escape(node->child1().node());
850                 break;
851             }
852
853             if (node->op() == NewGeneratorFunction)
854                 target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction);
855             else if (node->op() == NewAsyncFunction)
856                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncFunction);
857             else if (node->op() == NewAsyncGeneratorFunction)
858                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncGeneratorFunction);
859             else
860                 target = &m_heap.newAllocation(node, Allocation::Kind::Function);
861
862             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
863             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
864             break;
865         }
866
867         case CreateActivation: {
868             if (isStillValid(node->castOperand<SymbolTable*>()->singletonScope())) {
869                 m_heap.escape(node->child1().node());
870                 break;
871             }
872             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
873             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
874             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
875             {
876                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
877                 ConcurrentJSLocker locker(symbolTable->m_lock);
878                 LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
879                 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
880                     writes.add(
881                         PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
882                         initialValue);
883                 }
884             }
885             break;
886         }
887
888         case PutStructure:
889             target = m_heap.onlyLocalAllocation(node->child1().node());
890             if (target && target->isObjectAllocation()) {
891                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next.get()))));
892                 target->setStructures(node->transition()->next);
893             } else
894                 m_heap.escape(node->child1().node());
895             break;
896
897         case CheckStructureOrEmpty:
898         case CheckStructure: {
899             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
900             if (allocation && allocation->isObjectAllocation()) {
901                 RegisteredStructureSet filteredStructures = allocation->structures();
902                 filteredStructures.filter(node->structureSet());
903                 if (filteredStructures.isEmpty()) {
904                     // FIXME: Write a test for this:
905                     // https://bugs.webkit.org/show_bug.cgi?id=174322
906                     m_heap.escape(node->child1().node());
907                     break;
908                 }
909                 allocation->setStructures(filteredStructures);
910                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
911                     node->convertToCheckStructureImmediate(value);
912             } else
913                 m_heap.escape(node->child1().node());
914             break;
915         }
916
917         case GetByOffset:
918         case GetGetterSetterByOffset:
919             target = m_heap.onlyLocalAllocation(node->child2().node());
920             if (target && target->isObjectAllocation()) {
921                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
922                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
923             } else {
924                 m_heap.escape(node->child1().node());
925                 m_heap.escape(node->child2().node());
926             }
927             break;
928
929         case MultiGetByOffset: {
930             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
931             if (allocation && allocation->isObjectAllocation()) {
932                 MultiGetByOffsetData& data = node->multiGetByOffsetData();
933                 RegisteredStructureSet validStructures;
934                 bool hasInvalidStructures = false;
935                 for (const auto& multiGetByOffsetCase : data.cases) {
936                     if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
937                         continue;
938
939                     switch (multiGetByOffsetCase.method().kind()) {
940                     case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
941                     case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
942                         hasInvalidStructures = true;
943                         break;
944
945                     case GetByOffsetMethod::Load: // We're good
946                         validStructures.merge(multiGetByOffsetCase.set());
947                         break;
948
949                     default:
950                         RELEASE_ASSERT_NOT_REACHED();
951                     }
952                 }
953                 if (hasInvalidStructures || validStructures.isEmpty()) {
954                     m_heap.escape(node->child1().node());
955                     break;
956                 }
957                 unsigned identifierNumber = data.identifierNumber;
958                 PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
959                 if (Node* value = heapResolve(location)) {
960                     if (allocation->structures().isSubsetOf(validStructures))
961                         node->replaceWith(value);
962                     else {
963                         Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
964                         ASSERT(structure);
965                         allocation->filterStructures(validStructures);
966                         node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
967                         node->convertToCheckStructureImmediate(structure);
968                         node->setReplacement(value);
969                     }
970                 } else if (!allocation->structures().isSubsetOf(validStructures)) {
971                     // Even though we don't need the result here, we still need
972                     // to make the call to tell our caller that we could need
973                     // the StructurePLoc.
974                     // The reason for this is that when we decide not to sink a
975                     // node, we will still lower any read to its fields before
976                     // it escapes (which are usually reads across a function
977                     // call that DFGClobberize can't handle) - but we only do
978                     // this for PromotedHeapLocations that we have seen read
979                     // during the analysis!
980                     heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
981                     allocation->filterStructures(validStructures);
982                 }
983                 Node* identifier = allocation->get(location.descriptor());
984                 if (identifier)
985                     m_heap.newPointer(node, identifier);
986             } else
987                 m_heap.escape(node->child1().node());
988             break;
989         }
990
991         case PutByOffset:
992             target = m_heap.onlyLocalAllocation(node->child2().node());
993             if (target && target->isObjectAllocation()) {
994                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
995                 writes.add(
996                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
997                     LazyNode(node->child3().node()));
998             } else {
999                 m_heap.escape(node->child1().node());
1000                 m_heap.escape(node->child2().node());
1001                 m_heap.escape(node->child3().node());
1002             }
1003             break;
1004
1005         case GetClosureVar:
1006             target = m_heap.onlyLocalAllocation(node->child1().node());
1007             if (target && target->isActivationAllocation()) {
1008                 exactRead =
1009                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
1010             } else
1011                 m_heap.escape(node->child1().node());
1012             break;
1013
1014         case PutClosureVar:
1015             target = m_heap.onlyLocalAllocation(node->child1().node());
1016             if (target && target->isActivationAllocation()) {
1017                 writes.add(
1018                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
1019                     LazyNode(node->child2().node()));
1020             } else {
1021                 m_heap.escape(node->child1().node());
1022                 m_heap.escape(node->child2().node());
1023             }
1024             break;
1025
1026         case SkipScope:
1027             target = m_heap.onlyLocalAllocation(node->child1().node());
1028             if (target && target->isActivationAllocation())
1029                 exactRead = ActivationScopePLoc;
1030             else
1031                 m_heap.escape(node->child1().node());
1032             break;
1033
1034         case GetExecutable:
1035             target = m_heap.onlyLocalAllocation(node->child1().node());
1036             if (target && target->isFunctionAllocation())
1037                 exactRead = FunctionExecutablePLoc;
1038             else
1039                 m_heap.escape(node->child1().node());
1040             break;
1041
1042         case GetScope:
1043             target = m_heap.onlyLocalAllocation(node->child1().node());
1044             if (target && target->isFunctionAllocation())
1045                 exactRead = FunctionActivationPLoc;
1046             else
1047                 m_heap.escape(node->child1().node());
1048             break;
1049
1050         case Check:
1051             m_graph.doToChildren(
1052                 node,
1053                 [&] (Edge edge) {
1054                     if (edge.willNotHaveCheck())
1055                         return;
1056
1057                     if (alreadyChecked(edge.useKind(), SpecObject))
1058                         return;
1059
1060                     m_heap.escape(edge.node());
1061                 });
1062             break;
1063
1064         case MovHint:
1065         case PutHint:
1066             // Handled by OSR availability analysis
1067             break;
1068
1069         default:
1070             m_graph.doToChildren(
1071                 node,
1072                 [&] (Edge edge) {
1073                     m_heap.escape(edge.node());
1074                 });
1075             break;
1076         }
1077
1078         if (exactRead) {
1079             ASSERT(target);
1080             ASSERT(writes.isEmpty());
1081             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1082                 ASSERT(!value->replacement());
1083                 node->replaceWith(value);
1084             }
1085             Node* identifier = target->get(exactRead);
1086             if (identifier)
1087                 m_heap.newPointer(node, identifier);
1088         }
1089
1090         for (auto entry : writes) {
1091             ASSERT(target);
1092             if (entry.value.isNode())
1093                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1094             else
1095                 target->remove(entry.key);
1096             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1097         }
1098
1099         m_heap.assertIsValid();
1100     }
1101
1102     bool determineSinkCandidates()
1103     {
1104         m_sinkCandidates.clear();
1105         m_materializationToEscapee.clear();
1106         m_materializationSiteToMaterializations.clear();
1107         m_materializationSiteToRecoveries.clear();
1108         m_materializationSiteToHints.clear();
1109
1110         // Logically we wish to consider every allocation and sink
1111         // it. However, it is probably not profitable to sink an
1112         // allocation that will always escape. So, we only sink an
1113         // allocation if one of the following is true:
1114         //
1115         // 1) There exists a basic block with only backwards outgoing
1116         //    edges (or no outgoing edges) in which the node wasn't
1117         //    materialized. This is meant to catch
1118         //    effectively-infinite loops in which we don't need to
1119         //    have allocated the object.
1120         //
1121         // 2) There exists a basic block at the tail of which the node
1122         //    is dead and not materialized.
1123         //
1124         // 3) The sum of execution counts of the materializations is
1125         //    less than the sum of execution counts of the original
1126         //    node.
1127         //
1128         // We currently implement only rule #2.
1129         // FIXME: Implement the two other rules.
1130         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1131         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1132         //
1133         // However, these rules allow for a sunk object to be put into
1134         // a non-sunk one, which we don't support. We could solve this
1135         // by supporting PutHints on local allocations, making these
1136         // objects only partially correct, and we would need to adapt
1137         // the OSR availability analysis and OSR exit to handle
1138         // this. This would be totally doable, but would create a
1139         // super rare, and thus bug-prone, code path.
1140         // So, instead, we need to implement one of the following
1141         // closure rules:
1142         //
1143         // 1) If we put a sink candidate into a local allocation that
1144         //    is not a sink candidate, change our minds and don't
1145         //    actually sink the sink candidate.
1146         //
1147         // 2) If we put a sink candidate into a local allocation, that
1148         //    allocation becomes a sink candidate as well.
1149         //
1150         // We currently choose to implement closure rule #2.
1151         HashMap<Node*, Vector<Node*>> dependencies;
1152         bool hasUnescapedReads = false;
1153         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1154             m_heap = m_heapAtHead[block];
1155
1156             for (Node* node : *block) {
1157                 handleNode(
1158                     node,
1159                     [&] (PromotedHeapLocation location, LazyNode value) {
1160                         if (!value.isNode())
1161                             return;
1162
1163                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1164                         if (allocation && !allocation->isEscapedAllocation())
1165                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1166                     },
1167                     [&] (PromotedHeapLocation) -> Node* {
1168                         hasUnescapedReads = true;
1169                         return nullptr;
1170                     });
1171             }
1172
1173             // The sink candidates are initially the unescaped
1174             // allocations dying at tail of blocks
1175             NodeSet allocations;
1176             for (const auto& entry : m_heap.allocations()) {
1177                 if (!entry.value.isEscapedAllocation())
1178                     allocations.addVoid(entry.key);
1179             }
1180
1181             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1182
1183             for (Node* identifier : allocations) {
1184                 if (!m_heap.isAllocation(identifier))
1185                     m_sinkCandidates.addVoid(identifier);
1186             }
1187         }
1188
1189         // Ensure that the set of sink candidates is closed for put operations
1190         Vector<Node*> worklist;
1191         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1192
1193         while (!worklist.isEmpty()) {
1194             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1195                 if (m_sinkCandidates.add(identifier).isNewEntry)
1196                     worklist.append(identifier);
1197             }
1198         }
1199
1200         if (m_sinkCandidates.isEmpty())
1201             return hasUnescapedReads;
1202
1203         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1204             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1205
1206         // Create the materialization nodes
1207         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1208             m_heap = m_heapAtHead[block];
1209             m_heap.setWantEscapees();
1210
1211             for (Node* node : *block) {
1212                 handleNode(
1213                     node,
1214                     [] (PromotedHeapLocation, LazyNode) { },
1215                     [] (PromotedHeapLocation) -> Node* {
1216                         return nullptr;
1217                     });
1218                 auto escapees = m_heap.takeEscapees();
1219                 if (!escapees.isEmpty())
1220                     placeMaterializations(escapees, node);
1221             }
1222
1223             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1224
1225             {
1226                 HashMap<Node*, Allocation> escapingOnEdge;
1227                 for (const auto& entry : m_heap.allocations()) {
1228                     if (entry.value.isEscapedAllocation())
1229                         continue;
1230
1231                     bool mustEscape = false;
1232                     for (BasicBlock* successorBlock : block->successors()) {
1233                         if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1234                             || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1235                             mustEscape = true;
1236                     }
1237
1238                     if (mustEscape)
1239                         escapingOnEdge.add(entry.key, entry.value);
1240                 }
1241                 placeMaterializations(WTFMove(escapingOnEdge), block->terminal());
1242             }
1243         }
1244
1245         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1246     }
1247
1248     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1249     {
1250         // We don't create materializations if the escapee is not a
1251         // sink candidate
1252         Vector<Node*> toRemove;
1253         for (const auto& entry : escapees) {
1254             if (!m_sinkCandidates.contains(entry.key))
1255                 toRemove.append(entry.key);
1256         }
1257         for (Node* identifier : toRemove)
1258             escapees.remove(identifier);
1259
1260         if (escapees.isEmpty())
1261             return;
1262
1263         // First collect the hints that will be needed when the node
1264         // we materialize is still stored into other unescaped sink candidates.
1265         // The way to interpret this vector is:
1266         //
1267         // PromotedHeapLocation(NotEscapedAllocation, field) = identifierAllocation
1268         //
1269         // e.g:
1270         // PromotedHeapLocation(@PhantomNewFunction, FunctionActivationPLoc) = IdentifierOf(@MaterializeCreateActivation)
1271         // or:
1272         // PromotedHeapLocation(@PhantomCreateActivation, ClosureVarPLoc(x)) = IdentifierOf(@NewFunction)
1273         //
1274         // When the rhs of the `=` is to be materialized at this `where` point in the program
1275         // and IdentifierOf(Materialization) is the original sunken allocation of the materialization.
1276         //
1277         // The reason we need to collect all the `identifiers` here is that
1278         // we may materialize multiple versions of the allocation along control
1279         // flow edges. We will PutHint these values along those edges. However,
1280         // we also need to PutHint them when we join and have a Phi of the allocations.
1281         Vector<std::pair<PromotedHeapLocation, Node*>> hints;
1282         for (const auto& entry : m_heap.allocations()) {
1283             if (escapees.contains(entry.key))
1284                 continue;
1285
1286             for (const auto& field : entry.value.fields()) {
1287                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1288                 auto iter = escapees.find(field.value);
1289                 if (iter != escapees.end()) {
1290                     ASSERT(m_sinkCandidates.contains(field.value));
1291                     hints.append(std::make_pair(PromotedHeapLocation(entry.key, field.key), field.value));
1292                 }
1293             }
1294         }
1295
1296         // Now we need to order the materialization. Any order is
1297         // valid (as long as we materialize a node first if it is
1298         // needed for the materialization of another node, e.g. a
1299         // function's activation must be materialized before the
1300         // function itself), but we want to try minimizing the number
1301         // of times we have to place Puts to close cycles after a
1302         // materialization. In other words, we are trying to find the
1303         // minimum number of materializations to remove from the
1304         // materialization graph to make it a DAG, known as the
1305         // (vertex) feedback set problem. Unfortunately, this is a
1306         // NP-hard problem, which we don't want to solve exactly.
1307         //
1308         // Instead, we use a simple greedy procedure, that procedes as
1309         // follow:
1310         //  - While there is at least one node with no outgoing edge
1311         //    amongst the remaining materializations, materialize it
1312         //    first
1313         //
1314         //  - Similarily, while there is at least one node with no
1315         //    incoming edge amongst the remaining materializations,
1316         //    materialize it last.
1317         //
1318         //  - When both previous conditions are false, we have an
1319         //    actual cycle, and we need to pick a node to
1320         //    materialize. We try greedily to remove the "pressure" on
1321         //    the remaining nodes by choosing the node with maximum
1322         //    |incoming edges| * |outgoing edges| as a measure of how
1323         //    "central" to the graph it is. We materialize it first,
1324         //    so that all the recoveries will be Puts of things into
1325         //    it (rather than Puts of the materialization into other
1326         //    objects), which means we will have a single
1327         //    StoreBarrier.
1328
1329
1330         // Compute dependencies between materializations
1331         HashMap<Node*, NodeSet> dependencies;
1332         HashMap<Node*, NodeSet> reverseDependencies;
1333         HashMap<Node*, NodeSet> forMaterialization;
1334         for (const auto& entry : escapees) {
1335             auto& myDependencies = dependencies.add(entry.key, NodeSet()).iterator->value;
1336             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, NodeSet()).iterator->value;
1337             reverseDependencies.add(entry.key, NodeSet());
1338             for (const auto& field : entry.value.fields()) {
1339                 if (escapees.contains(field.value) && field.value != entry.key) {
1340                     myDependencies.addVoid(field.value);
1341                     reverseDependencies.add(field.value, NodeSet()).iterator->value.addVoid(entry.key);
1342                     if (field.key.neededForMaterialization())
1343                         myDependenciesForMaterialization.addVoid(field.value);
1344                 }
1345             }
1346         }
1347
1348         // Helper function to update the materialized set and the
1349         // dependencies
1350         NodeSet materialized;
1351         auto materialize = [&] (Node* identifier) {
1352             materialized.addVoid(identifier);
1353             for (Node* dep : dependencies.get(identifier))
1354                 reverseDependencies.find(dep)->value.remove(identifier);
1355             for (Node* rdep : reverseDependencies.get(identifier)) {
1356                 dependencies.find(rdep)->value.remove(identifier);
1357                 forMaterialization.find(rdep)->value.remove(identifier);
1358             }
1359             dependencies.remove(identifier);
1360             reverseDependencies.remove(identifier);
1361             forMaterialization.remove(identifier);
1362         };
1363
1364         // Nodes without remaining unmaterialized fields will be
1365         // materialized first - amongst the remaining unmaterialized
1366         // nodes
1367         std::list<Allocation, FastAllocator<Allocation>> toMaterialize;
1368         auto firstPos = toMaterialize.begin();
1369         auto materializeFirst = [&] (Allocation&& allocation) {
1370             materialize(allocation.identifier());
1371             // We need to insert *after* the current position
1372             if (firstPos != toMaterialize.end())
1373                 ++firstPos;
1374             firstPos = toMaterialize.insert(firstPos, WTFMove(allocation));
1375         };
1376
1377         // Nodes that no other unmaterialized node points to will be
1378         // materialized last - amongst the remaining unmaterialized
1379         // nodes
1380         auto lastPos = toMaterialize.end();
1381         auto materializeLast = [&] (Allocation&& allocation) {
1382             materialize(allocation.identifier());
1383             lastPos = toMaterialize.insert(lastPos, WTFMove(allocation));
1384         };
1385
1386         // These are the promoted locations that contains some of the
1387         // allocations we are currently escaping. If they are a location on
1388         // some other allocation we are currently materializing, we will need
1389         // to "recover" their value with a real put once the corresponding
1390         // allocation is materialized; if they are a location on some other
1391         // not-yet-materialized allocation, we will need a PutHint.
1392         Vector<PromotedHeapLocation> toRecover;
1393
1394         // This loop does the actual cycle breaking
1395         while (!escapees.isEmpty()) {
1396             materialized.clear();
1397
1398             // Materialize nodes that won't require recoveries if we can
1399             for (auto& entry : escapees) {
1400                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1401                     continue;
1402
1403                 if (dependencies.find(entry.key)->value.isEmpty()) {
1404                     materializeFirst(WTFMove(entry.value));
1405                     continue;
1406                 }
1407
1408                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1409                     materializeLast(WTFMove(entry.value));
1410                     continue;
1411                 }
1412             }
1413
1414             // We reach this only if there is an actual cycle that needs
1415             // breaking. Because we do not want to solve a NP-hard problem
1416             // here, we just heuristically pick a node and materialize it
1417             // first.
1418             if (materialized.isEmpty()) {
1419                 uint64_t maxEvaluation = 0;
1420                 Allocation* bestAllocation = nullptr;
1421                 for (auto& entry : escapees) {
1422                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1423                         continue;
1424
1425                     uint64_t evaluation =
1426                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1427                     if (evaluation > maxEvaluation) {
1428                         maxEvaluation = evaluation;
1429                         bestAllocation = &entry.value;
1430                     }
1431                 }
1432                 RELEASE_ASSERT(maxEvaluation > 0);
1433
1434                 materializeFirst(WTFMove(*bestAllocation));
1435             }
1436             RELEASE_ASSERT(!materialized.isEmpty());
1437
1438             for (Node* identifier : materialized)
1439                 escapees.remove(identifier);
1440         }
1441
1442         materialized.clear();
1443
1444         NodeSet escaped;
1445         for (const Allocation& allocation : toMaterialize)
1446             escaped.addVoid(allocation.identifier());
1447         for (const Allocation& allocation : toMaterialize) {
1448             for (const auto& field : allocation.fields()) {
1449                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1450                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1451             }
1452             materialized.addVoid(allocation.identifier());
1453         }
1454
1455         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1456             where, Vector<Node*>()).iterator->value;
1457
1458         for (const Allocation& allocation : toMaterialize) {
1459             Node* materialization = createMaterialization(allocation, where);
1460             materializations.append(materialization);
1461             m_materializationToEscapee.add(materialization, allocation.identifier());
1462         }
1463
1464         if (!toRecover.isEmpty()) {
1465             m_materializationSiteToRecoveries.add(
1466                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1467         }
1468
1469         // The hints need to be after the "real" recoveries so that we
1470         // don't hint not-yet-complete objects
1471         m_materializationSiteToHints.add(
1472             where, Vector<std::pair<PromotedHeapLocation, Node*>>()).iterator->value.appendVector(hints);
1473     }
1474
1475     Node* createMaterialization(const Allocation& allocation, Node* where)
1476     {
1477         // FIXME: This is the only place where we actually use the
1478         // fact that an allocation's identifier is indeed the node
1479         // that created the allocation.
1480         switch (allocation.kind()) {
1481         case Allocation::Kind::Object: {
1482             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1483
1484             return m_graph.addNode(
1485                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1486                 where->origin.withSemantic(allocation.identifier()->origin.semantic),
1487                 OpInfo(m_graph.addStructureSet(allocation.structures())), OpInfo(data), 0, 0);
1488         }
1489
1490         case Allocation::Kind::AsyncGeneratorFunction:
1491         case Allocation::Kind::AsyncFunction:
1492         case Allocation::Kind::GeneratorFunction:
1493         case Allocation::Kind::Function: {
1494             FrozenValue* executable = allocation.identifier()->cellOperand();
1495             
1496             NodeType nodeType;
1497             switch (allocation.kind()) {
1498             case Allocation::Kind::GeneratorFunction:
1499                 nodeType = NewGeneratorFunction;
1500                 break;
1501             case Allocation::Kind::AsyncGeneratorFunction:
1502                 nodeType = NewAsyncGeneratorFunction;
1503                 break;
1504             case Allocation::Kind::AsyncFunction:
1505                 nodeType = NewAsyncFunction;
1506                 break;
1507             default:
1508                 nodeType = NewFunction;
1509             }
1510
1511             return m_graph.addNode(
1512                 allocation.identifier()->prediction(), nodeType,
1513                 where->origin.withSemantic(
1514                     allocation.identifier()->origin.semantic),
1515                 OpInfo(executable));
1516             break;
1517         }
1518
1519         case Allocation::Kind::Activation: {
1520             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1521             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1522
1523             return m_graph.addNode(
1524                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1525                 where->origin.withSemantic(
1526                     allocation.identifier()->origin.semantic),
1527                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1528         }
1529
1530         default:
1531             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1532         }
1533     }
1534
1535     void promoteLocalHeap()
1536     {
1537         // Collect the set of heap locations that we will be operating
1538         // over.
1539         HashSet<PromotedHeapLocation> locations;
1540         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1541             m_heap = m_heapAtHead[block];
1542
1543             for (Node* node : *block) {
1544                 handleNode(
1545                     node,
1546                     [&] (PromotedHeapLocation location, LazyNode) {
1547                         // If the location is not on a sink candidate,
1548                         // we only sink it if it is read
1549                         if (m_sinkCandidates.contains(location.base()))
1550                             locations.addVoid(location);
1551                     },
1552                     [&] (PromotedHeapLocation location) -> Node* {
1553                         locations.addVoid(location);
1554                         return nullptr;
1555                     });
1556             }
1557         }
1558
1559         // Figure out which locations belong to which allocations.
1560         m_locationsForAllocation.clear();
1561         for (PromotedHeapLocation location : locations) {
1562             auto result = m_locationsForAllocation.add(
1563                 location.base(),
1564                 Vector<PromotedHeapLocation>());
1565             ASSERT(!result.iterator->value.contains(location));
1566             result.iterator->value.append(location);
1567         }
1568
1569         m_pointerSSA.reset();
1570         m_allocationSSA.reset();
1571
1572         // Collect the set of "variables" that we will be sinking.
1573         m_locationToVariable.clear();
1574         m_nodeToVariable.clear();
1575         Vector<Node*> indexToNode;
1576         Vector<PromotedHeapLocation> indexToLocation;
1577
1578         for (Node* index : m_sinkCandidates) {
1579             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1580             m_nodeToVariable.add(index, variable);
1581             ASSERT(indexToNode.size() == variable->index());
1582             indexToNode.append(index);
1583         }
1584
1585         for (PromotedHeapLocation location : locations) {
1586             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1587             m_locationToVariable.add(location, variable);
1588             ASSERT(indexToLocation.size() == variable->index());
1589             indexToLocation.append(location);
1590         }
1591
1592         // We insert all required constants at top of block 0 so that
1593         // they are inserted only once and we don't clutter the graph
1594         // with useless constants everywhere
1595         HashMap<FrozenValue*, Node*> lazyMapping;
1596         if (!m_bottom)
1597             m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
1598
1599         Vector<HashSet<PromotedHeapLocation>> hintsForPhi(m_sinkCandidates.size());
1600
1601         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1602             m_heap = m_heapAtHead[block];
1603
1604             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1605                 Node* node = block->at(nodeIndex);
1606
1607                 // Some named properties can be added conditionally,
1608                 // and that would necessitate bottoms
1609                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1610                     if (location.kind() != NamedPropertyPLoc)
1611                         continue;
1612
1613                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1614                     m_pointerSSA.newDef(variable, block, m_bottom);
1615                 }
1616
1617                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1618                     Node* escapee = m_materializationToEscapee.get(materialization);
1619                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1620                 }
1621
1622                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node)) {
1623                     PromotedHeapLocation location = pair.first;
1624                     Node* identifier = pair.second;
1625                     // We're materializing `identifier` at this point, and the unmaterialized
1626                     // version is inside `location`. We track which SSA variable this belongs
1627                     // to in case we also need a PutHint for the Phi.
1628                     if (UNLIKELY(validationEnabled())) {
1629                         RELEASE_ASSERT(m_sinkCandidates.contains(location.base()));
1630                         RELEASE_ASSERT(m_sinkCandidates.contains(identifier));
1631
1632                         bool found = false;
1633                         for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1634                             // We're materializing `identifier` here. This asserts that this is indeed the case.
1635                             if (m_materializationToEscapee.get(materialization) == identifier) {
1636                                 found = true;
1637                                 break;
1638                             }
1639                         }
1640                         RELEASE_ASSERT(found);
1641                     }
1642
1643                     SSACalculator::Variable* variable = m_nodeToVariable.get(identifier);
1644                     hintsForPhi[variable->index()].addVoid(location);
1645                 }
1646
1647                 if (m_sinkCandidates.contains(node))
1648                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1649
1650                 handleNode(
1651                     node,
1652                     [&] (PromotedHeapLocation location, LazyNode value) {
1653                         if (!locations.contains(location))
1654                             return;
1655
1656                         Node* nodeValue;
1657                         if (value.isNode())
1658                             nodeValue = value.asNode();
1659                         else {
1660                             auto iter = lazyMapping.find(value.asValue());
1661                             if (iter != lazyMapping.end())
1662                                 nodeValue = iter->value;
1663                             else {
1664                                 nodeValue = value.ensureIsNode(
1665                                     m_insertionSet, m_graph.block(0), 0);
1666                                 lazyMapping.add(value.asValue(), nodeValue);
1667                             }
1668                         }
1669
1670                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1671                         m_pointerSSA.newDef(variable, block, nodeValue);
1672                     },
1673                     [] (PromotedHeapLocation) -> Node* {
1674                         return nullptr;
1675                     });
1676             }
1677         }
1678         m_insertionSet.execute(m_graph.block(0));
1679
1680         // Run the SSA calculators to create Phis
1681         m_pointerSSA.computePhis(
1682             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1683                 PromotedHeapLocation location = indexToLocation[variable->index()];
1684
1685                 // Don't create Phi nodes for fields of dead allocations
1686                 if (!m_heapAtHead[block].isAllocation(location.base()))
1687                     return nullptr;
1688
1689                 // Don't create Phi nodes once we are escaped
1690                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1691                     return nullptr;
1692
1693                 // If we point to a single allocation, we will
1694                 // directly use its materialization
1695                 if (m_heapAtHead[block].follow(location))
1696                     return nullptr;
1697
1698                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1699                 phiNode->mergeFlags(NodeResultJS);
1700                 return phiNode;
1701             });
1702
1703         m_allocationSSA.computePhis(
1704             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1705                 Node* identifier = indexToNode[variable->index()];
1706
1707                 // Don't create Phi nodes for dead allocations
1708                 if (!m_heapAtHead[block].isAllocation(identifier))
1709                     return nullptr;
1710
1711                 // Don't create Phi nodes until we are escaped
1712                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1713                     return nullptr;
1714
1715                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1716                 phiNode->mergeFlags(NodeResultJS);
1717                 return phiNode;
1718             });
1719
1720         // Place Phis in the right places, replace all uses of any load with the appropriate
1721         // value, and create the materialization nodes.
1722         LocalOSRAvailabilityCalculator availabilityCalculator(m_graph);
1723         m_graph.clearReplacements();
1724         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1725             m_heap = m_heapAtHead[block];
1726             availabilityCalculator.beginBlock(block);
1727
1728             // These mapping tables are intended to be lazy. If
1729             // something is omitted from the table, it means that
1730             // there haven't been any local stores to the promoted
1731             // heap location (or any local materialization).
1732             m_localMapping.clear();
1733             m_escapeeToMaterialization.clear();
1734
1735             // Insert the Phi functions that we had previously
1736             // created.
1737             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1738                 SSACalculator::Variable* variable = phiDef->variable();
1739                 m_insertionSet.insert(0, phiDef->value());
1740
1741                 PromotedHeapLocation location = indexToLocation[variable->index()];
1742                 m_localMapping.set(location, phiDef->value());
1743
1744                 if (m_sinkCandidates.contains(location.base())) {
1745                     m_insertionSet.insert(
1746                         0,
1747                         location.createHint(
1748                             m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1749                 }
1750             }
1751
1752             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1753                 SSACalculator::Variable* variable = phiDef->variable();
1754                 m_insertionSet.insert(0, phiDef->value());
1755
1756                 Node* identifier = indexToNode[variable->index()];
1757                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1758                 bool canExit = false;
1759                 insertOSRHintsForUpdate(
1760                     0, block->at(0)->origin, canExit,
1761                     availabilityCalculator.m_availability, identifier, phiDef->value());
1762
1763                 for (PromotedHeapLocation location : hintsForPhi[variable->index()]) {
1764                     m_insertionSet.insert(0,
1765                         location.createHint(m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1766                     m_localMapping.set(location, phiDef->value());
1767                 }
1768             }
1769
1770             if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
1771                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1772                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1773             }
1774
1775             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1776                 Node* node = block->at(nodeIndex);
1777                 bool canExit = true;
1778                 bool nextCanExit = node->origin.exitOK;
1779                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1780                     if (location.kind() != NamedPropertyPLoc)
1781                         continue;
1782
1783                     m_localMapping.set(location, m_bottom);
1784
1785                     if (m_sinkCandidates.contains(node)) {
1786                         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1787                             dataLog("For sink candidate ", node, " found location ", location, "\n");
1788                         m_insertionSet.insert(
1789                             nodeIndex + 1,
1790                             location.createHint(
1791                                 m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
1792                     }
1793                 }
1794
1795                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1796                     materialization->origin.exitOK &= canExit;
1797                     Node* escapee = m_materializationToEscapee.get(materialization);
1798                     populateMaterialization(block, materialization, escapee);
1799                     m_escapeeToMaterialization.set(escapee, materialization);
1800                     m_insertionSet.insert(nodeIndex, materialization);
1801                     if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1802                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1803                 }
1804
1805                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1806                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
1807                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node))
1808                     m_insertionSet.insert(nodeIndex, createRecovery(block, pair.first, node, canExit));
1809
1810                 // We need to put the OSR hints after the recoveries,
1811                 // because we only want the hints once the object is
1812                 // complete
1813                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1814                     Node* escapee = m_materializationToEscapee.get(materialization);
1815                     insertOSRHintsForUpdate(
1816                         nodeIndex, node->origin, canExit,
1817                         availabilityCalculator.m_availability, escapee, materialization);
1818                 }
1819
1820                 if (node->origin.exitOK && !canExit) {
1821                     // We indicate that the exit state is fine now. It is OK because we updated the
1822                     // state above. We need to indicate this manually because the validation doesn't
1823                     // have enough information to infer that the exit state is fine.
1824                     m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
1825                 }
1826
1827                 if (m_sinkCandidates.contains(node))
1828                     m_escapeeToMaterialization.set(node, node);
1829
1830                 availabilityCalculator.executeNode(node);
1831
1832                 bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
1833
1834                 bool doLower = false;
1835                 handleNode(
1836                     node,
1837                     [&] (PromotedHeapLocation location, LazyNode value) {
1838                         if (!locations.contains(location))
1839                             return;
1840
1841                         Node* nodeValue;
1842                         if (value.isNode())
1843                             nodeValue = value.asNode();
1844                         else
1845                             nodeValue = lazyMapping.get(value.asValue());
1846
1847                         nodeValue = resolve(block, nodeValue);
1848
1849                         m_localMapping.set(location, nodeValue);
1850
1851                         if (!m_sinkCandidates.contains(location.base()))
1852                             return;
1853
1854                         doLower = true;
1855
1856                         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1857                             dataLog("Creating hint with value ", nodeValue, " before ", node, "\n");
1858                         m_insertionSet.insert(
1859                             nodeIndex + 1,
1860                             location.createHint(
1861                                 m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
1862                     },
1863                     [&] (PromotedHeapLocation location) -> Node* {
1864                         return resolve(block, location);
1865                     });
1866
1867                 if (!nextCanExit && desiredNextExitOK) {
1868                     // We indicate that the exit state is fine now. We need to do this because we
1869                     // emitted hints that appear to invalidate the exit state.
1870                     m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
1871                 }
1872
1873                 if (m_sinkCandidates.contains(node) || doLower) {
1874                     switch (node->op()) {
1875                     case NewObject:
1876                         node->convertToPhantomNewObject();
1877                         break;
1878
1879                     case NewFunction:
1880                         node->convertToPhantomNewFunction();
1881                         break;
1882
1883                     case NewGeneratorFunction:
1884                         node->convertToPhantomNewGeneratorFunction();
1885                         break;
1886                     case NewAsyncGeneratorFunction:
1887                         node->convertToPhantomNewAsyncGeneratorFunction();
1888                         break;
1889                     case NewAsyncFunction:
1890                         node->convertToPhantomNewAsyncFunction();
1891                         break;
1892
1893                     case CreateActivation:
1894                         node->convertToPhantomCreateActivation();
1895                         break;
1896
1897                     default:
1898                         node->remove();
1899                         break;
1900                     }
1901                 }
1902
1903                 m_graph.doToChildren(
1904                     node,
1905                     [&] (Edge& edge) {
1906                         edge.setNode(resolve(block, edge.node()));
1907                     });
1908             }
1909
1910             // Gotta drop some Upsilons.
1911             NodeAndIndex terminal = block->findTerminal();
1912             size_t upsilonInsertionPoint = terminal.index;
1913             NodeOrigin upsilonOrigin = terminal.node->origin;
1914             for (BasicBlock* successorBlock : block->successors()) {
1915                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1916                     Node* phiNode = phiDef->value();
1917                     SSACalculator::Variable* variable = phiDef->variable();
1918                     PromotedHeapLocation location = indexToLocation[variable->index()];
1919                     Node* incoming = resolve(block, location);
1920
1921                     m_insertionSet.insertNode(
1922                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1923                         OpInfo(phiNode), incoming->defaultEdge());
1924                 }
1925
1926                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1927                     Node* phiNode = phiDef->value();
1928                     SSACalculator::Variable* variable = phiDef->variable();
1929                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1930
1931                     m_insertionSet.insertNode(
1932                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1933                         OpInfo(phiNode), incoming->defaultEdge());
1934                 }
1935             }
1936
1937             m_insertionSet.execute(block);
1938         }
1939     }
1940
1941     NEVER_INLINE Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1942     {
1943         // If we are currently pointing to a single local allocation,
1944         // simply return the associated materialization.
1945         if (Node* identifier = m_heap.follow(location))
1946             return getMaterialization(block, identifier);
1947
1948         if (Node* result = m_localMapping.get(location))
1949             return result;
1950
1951         // This implies that there is no local mapping. Find a non-local mapping.
1952         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1953             block, m_locationToVariable.get(location));
1954         ASSERT(def);
1955         ASSERT(def->value());
1956
1957         Node* result = def->value();
1958         if (result->replacement())
1959             result = result->replacement();
1960         ASSERT(!result->replacement());
1961
1962         m_localMapping.add(location, result);
1963         return result;
1964     }
1965
1966     NEVER_INLINE Node* resolve(BasicBlock* block, Node* node)
1967     {
1968         // If we are currently pointing to a single local allocation,
1969         // simply return the associated materialization.
1970         if (Node* identifier = m_heap.follow(node))
1971             return getMaterialization(block, identifier);
1972
1973         if (node->replacement())
1974             node = node->replacement();
1975         ASSERT(!node->replacement());
1976
1977         return node;
1978     }
1979
1980     NEVER_INLINE Node* getMaterialization(BasicBlock* block, Node* identifier)
1981     {
1982         ASSERT(m_heap.isAllocation(identifier));
1983         if (!m_sinkCandidates.contains(identifier))
1984             return identifier;
1985
1986         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1987             return materialization;
1988
1989         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1990             block, m_nodeToVariable.get(identifier));
1991         ASSERT(def && def->value());
1992         m_escapeeToMaterialization.add(identifier, def->value());
1993         ASSERT(!def->value()->replacement());
1994         return def->value();
1995     }
1996
1997     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
1998     {
1999         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
2000             dataLog("Inserting OSR hints at ", origin, ":\n");
2001             dataLog("    Escapee: ", escapee, "\n");
2002             dataLog("    Materialization: ", materialization, "\n");
2003             dataLog("    Availability: ", availability, "\n");
2004         }
2005         
2006         // We need to follow() the value in the heap.
2007         // Consider the following graph:
2008         //
2009         // Block #0
2010         //   0: NewObject({})
2011         //   1: NewObject({})
2012         //   -: PutByOffset(@0, @1, x:0)
2013         //   -: PutStructure(@0, {x:0})
2014         //   2: GetByOffset(@0, x:0)
2015         //   -: MovHint(@2, loc1)
2016         //   -: Branch(#1, #2)
2017         //
2018         // Block #1
2019         //   3: Call(f, @1)
2020         //   4: Return(@0)
2021         //
2022         // Block #2
2023         //   -: Return(undefined)
2024         //
2025         // We need to materialize @1 at @3, and when doing so we need
2026         // to insert a MovHint for the materialization into loc1 as
2027         // well.
2028         // In order to do this, we say that we need to insert an
2029         // update hint for any availability whose node resolve()s to
2030         // the materialization.
2031         for (auto entry : availability.m_heap) {
2032             if (!entry.value.hasNode())
2033                 continue;
2034             if (m_heap.follow(entry.value.node()) != escapee)
2035                 continue;
2036
2037             m_insertionSet.insert(
2038                 nodeIndex,
2039                 entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
2040         }
2041
2042         for (unsigned i = availability.m_locals.size(); i--;) {
2043             if (!availability.m_locals[i].hasNode())
2044                 continue;
2045             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
2046                 continue;
2047
2048             int operand = availability.m_locals.operandForIndex(i);
2049             m_insertionSet.insertNode(
2050                 nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
2051                 materialization->defaultEdge());
2052         }
2053     }
2054
2055     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
2056     {
2057         Allocation& allocation = m_heap.getAllocation(escapee);
2058         switch (node->op()) {
2059         case MaterializeNewObject: {
2060             ObjectMaterializationData& data = node->objectMaterializationData();
2061             unsigned firstChild = m_graph.m_varArgChildren.size();
2062
2063             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2064
2065             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
2066             ASSERT(locations.contains(structure));
2067
2068             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
2069
2070             for (PromotedHeapLocation location : locations) {
2071                 switch (location.kind()) {
2072                 case StructurePLoc:
2073                     ASSERT(location == structure);
2074                     break;
2075
2076                 case NamedPropertyPLoc: {
2077                     ASSERT(location.base() == allocation.identifier());
2078                     data.m_properties.append(location.descriptor());
2079                     Node* value = resolve(block, location);
2080                     if (m_sinkCandidates.contains(value))
2081                         m_graph.m_varArgChildren.append(m_bottom);
2082                     else
2083                         m_graph.m_varArgChildren.append(value);
2084                     break;
2085                 }
2086
2087                 default:
2088                     DFG_CRASH(m_graph, node, "Bad location kind");
2089                 }
2090             }
2091
2092             node->children = AdjacencyList(
2093                 AdjacencyList::Variable,
2094                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2095             break;
2096         }
2097
2098         case MaterializeCreateActivation: {
2099             ObjectMaterializationData& data = node->objectMaterializationData();
2100
2101             unsigned firstChild = m_graph.m_varArgChildren.size();
2102
2103             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2104
2105             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
2106             ASSERT(locations.contains(symbolTable));
2107             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
2108             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
2109
2110             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
2111             ASSERT(locations.contains(scope));
2112             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
2113
2114             for (PromotedHeapLocation location : locations) {
2115                 switch (location.kind()) {
2116                 case ActivationScopePLoc: {
2117                     ASSERT(location == scope);
2118                     break;
2119                 }
2120
2121                 case ActivationSymbolTablePLoc: {
2122                     ASSERT(location == symbolTable);
2123                     break;
2124                 }
2125
2126                 case ClosureVarPLoc: {
2127                     ASSERT(location.base() == allocation.identifier());
2128                     data.m_properties.append(location.descriptor());
2129                     Node* value = resolve(block, location);
2130                     if (m_sinkCandidates.contains(value))
2131                         m_graph.m_varArgChildren.append(m_bottom);
2132                     else
2133                         m_graph.m_varArgChildren.append(value);
2134                     break;
2135                 }
2136
2137                 default:
2138                     DFG_CRASH(m_graph, node, "Bad location kind");
2139                 }
2140             }
2141
2142             node->children = AdjacencyList(
2143                 AdjacencyList::Variable,
2144                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2145             break;
2146         }
2147         
2148         case NewFunction:
2149         case NewGeneratorFunction:
2150         case NewAsyncGeneratorFunction:
2151         case NewAsyncFunction: {
2152             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2153             ASSERT(locations.size() == 2);
2154                 
2155             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
2156             ASSERT_UNUSED(executable, locations.contains(executable));
2157                 
2158             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
2159             ASSERT(locations.contains(activation));
2160
2161             node->child1() = Edge(resolve(block, activation), KnownCellUse);
2162             break;
2163         }
2164
2165         default:
2166             DFG_CRASH(m_graph, node, "Bad materialize op");
2167         }
2168     }
2169
2170     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
2171     {
2172         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
2173             dataLog("Recovering ", location, " at ", where, "\n");
2174         ASSERT(location.base()->isPhantomAllocation());
2175         Node* base = getMaterialization(block, location.base());
2176         Node* value = resolve(block, location);
2177
2178         NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
2179
2180         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
2181             dataLog("Base is ", base, " and value is ", value, "\n");
2182
2183         if (base->isPhantomAllocation()) {
2184             return PromotedHeapLocation(base, location.descriptor()).createHint(
2185                 m_graph, origin.takeValidExit(canExit), value);
2186         }
2187
2188         switch (location.kind()) {
2189         case NamedPropertyPLoc: {
2190             Allocation& allocation = m_heap.getAllocation(location.base());
2191
2192             Vector<RegisteredStructure> structures;
2193             structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2194             unsigned identifierNumber = location.info();
2195             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2196
2197             std::sort(
2198                 structures.begin(),
2199                 structures.end(),
2200                 [uid] (RegisteredStructure a, RegisteredStructure b) -> bool {
2201                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2202                 });
2203
2204             RELEASE_ASSERT(structures.size());
2205             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2206
2207             if (firstOffset == structures.last()->getConcurrently(uid)) {
2208                 Node* storage = base;
2209                 // FIXME: When we decide to sink objects with a
2210                 // property storage, we should handle non-inline offsets.
2211                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2212
2213                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2214                 data->offset = firstOffset;
2215                 data->identifierNumber = identifierNumber;
2216
2217                 return m_graph.addNode(
2218                     PutByOffset,
2219                     origin.takeValidExit(canExit),
2220                     OpInfo(data),
2221                     Edge(storage, KnownCellUse),
2222                     Edge(base, KnownCellUse),
2223                     value->defaultEdge());
2224             }
2225
2226             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2227             data->identifierNumber = identifierNumber;
2228
2229             {
2230                 PropertyOffset currentOffset = firstOffset;
2231                 StructureSet currentSet;
2232                 for (RegisteredStructure structure : structures) {
2233                     PropertyOffset offset = structure->getConcurrently(uid);
2234                     if (offset != currentOffset) {
2235                         // Because our analysis treats MultiPutByOffset like an escape, we only have to
2236                         // deal with storing results that would have been previously stored by PutByOffset
2237                         // nodes. Those nodes were guarded by the appropriate type checks. This means that
2238                         // at this point, we can simply trust that the incoming value has the right type
2239                         // for whatever structure we are using.
2240                         data->variants.append(
2241                             PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2242                         currentOffset = offset;
2243                         currentSet.clear();
2244                     }
2245                     currentSet.add(structure.get());
2246                 }
2247                 data->variants.append(
2248                     PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2249             }
2250
2251             return m_graph.addNode(
2252                 MultiPutByOffset,
2253                 origin.takeValidExit(canExit),
2254                 OpInfo(data),
2255                 Edge(base, KnownCellUse),
2256                 value->defaultEdge());
2257             break;
2258         }
2259
2260         case ClosureVarPLoc: {
2261             return m_graph.addNode(
2262                 PutClosureVar,
2263                 origin.takeValidExit(canExit),
2264                 OpInfo(location.info()),
2265                 Edge(base, KnownCellUse),
2266                 value->defaultEdge());
2267             break;
2268         }
2269
2270         default:
2271             DFG_CRASH(m_graph, base, "Bad location kind");
2272             break;
2273         }
2274     }
2275
2276     // This is a great way of asking value->isStillValid() without having to worry about getting
2277     // different answers. It turns out that this analysis works OK regardless of what this
2278     // returns but breaks badly if this changes its mind for any particular InferredValue. This
2279     // method protects us from that.
2280     bool isStillValid(InferredValue* value)
2281     {
2282         return m_validInferredValues.add(value, value->isStillValid()).iterator->value;
2283     }
2284
2285     SSACalculator m_pointerSSA;
2286     SSACalculator m_allocationSSA;
2287     NodeSet m_sinkCandidates;
2288     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2289     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2290     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2291     HashMap<Node*, Node*> m_escapeeToMaterialization;
2292     InsertionSet m_insertionSet;
2293     CombinedLiveness m_combinedLiveness;
2294
2295     HashMap<InferredValue*, bool> m_validInferredValues;
2296
2297     HashMap<Node*, Node*> m_materializationToEscapee;
2298     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2299     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2300     HashMap<Node*, Vector<std::pair<PromotedHeapLocation, Node*>>> m_materializationSiteToHints;
2301
2302     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2303
2304     BlockMap<LocalHeap> m_heapAtHead;
2305     BlockMap<LocalHeap> m_heapAtTail;
2306     LocalHeap m_heap;
2307
2308     Node* m_bottom = nullptr;
2309 };
2310
2311 }
2312
2313 bool performObjectAllocationSinking(Graph& graph)
2314 {
2315     return runPhase<ObjectAllocationSinkingPhase>(graph);
2316 }
2317
2318 } } // namespace JSC::DFG
2319
2320 #endif // ENABLE(DFG_JIT)