typeCheckHoistingPhase may emit a CheckStructure on the empty value which leads to...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGClobbersExitState.h"
33 #include "DFGCombinedLiveness.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertionSet.h"
36 #include "DFGLazyNode.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromotedHeapLocation.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 #include <list>
46
47 namespace JSC { namespace DFG {
48
49 namespace {
50
51 bool verbose = false;
52
53 // In order to sink object cycles, we use a points-to analysis coupled
54 // with an escape analysis. This analysis is actually similar to an
55 // abstract interpreter focused on local allocations and ignoring
56 // everything else.
57 //
58 // We represent the local heap using two mappings:
59 //
60 // - A set of the local allocations present in the function, where
61 //   each of those have a further mapping from
62 //   PromotedLocationDescriptor to local allocations they must point
63 //   to.
64 //
65 // - A "pointer" mapping from nodes to local allocations, if they must
66 //   be equal to said local allocation and are currently live. This
67 //   can be because the node is the actual node that created the
68 //   allocation, or any other node that must currently point to it -
69 //   we don't make a difference.
70 //
71 // The following graph is a motivation for why we separate allocations
72 // from pointers:
73 //
74 // Block #0
75 //  0: NewObject({})
76 //  1: NewObject({})
77 //  -: PutByOffset(@0, @1, x)
78 //  -: PutStructure(@0, {x:0})
79 //  2: GetByOffset(@0, x)
80 //  -: Jump(#1)
81 //
82 // Block #1
83 //  -: Return(@2)
84 //
85 // Here, we need to remember in block #1 that @2 points to a local
86 // allocation with appropriate fields and structures information
87 // (because we should be able to place a materialization on top of
88 // block #1 here), even though @1 is dead. We *could* just keep @1
89 // artificially alive here, but there is no real reason to do it:
90 // after all, by the end of block #0, @1 and @2 should be completely
91 // interchangeable, and there is no reason for us to artificially make
92 // @1 more important.
93 //
94 // An important point to consider to understand this separation is
95 // that we should think of the local heap as follow: we have a
96 // bunch of nodes that are pointers to "allocations" that live
97 // someplace on the heap, and those allocations can have pointers in
98 // between themselves as well. We shouldn't care about whatever
99 // names we give to the allocations ; what matters when
100 // comparing/merging two heaps is the isomorphism/comparison between
101 // the allocation graphs as seen by the nodes.
102 //
103 // For instance, in the following graph:
104 //
105 // Block #0
106 //  0: NewObject({})
107 //  -: Branch(#1, #2)
108 //
109 // Block #1
110 //  1: NewObject({})
111 //  -: PutByOffset(@0, @1, x)
112 //  -: PutStructure(@0, {x:0})
113 //  -: Jump(#3)
114 //
115 // Block #2
116 //  2: NewObject({})
117 //  -: PutByOffset(@2, undefined, x)
118 //  -: PutStructure(@2, {x:0})
119 //  -: PutByOffset(@0, @2, x)
120 //  -: PutStructure(@0, {x:0})
121 //  -: Jump(#3)
122 //
123 // Block #3
124 //  -: Return(@0)
125 //
126 // we should think of the heaps at tail of blocks #1 and #2 as being
127 // exactly the same, even though one has @0.x pointing to @1 and the
128 // other has @0.x pointing to @2, because in essence this should not
129 // be different from the graph where we hoisted @1 and @2 into a
130 // single allocation in block #0. We currently will not handle this
131 // case, because we merge allocations based on the node they are
132 // coming from, but this is only a technicality for the sake of
133 // simplicity that shouldn't hide the deeper idea outlined here.
134
135 class Allocation {
136 public:
137     // We use Escaped as a special allocation kind because when we
138     // decide to sink an allocation, we still need to keep track of it
139     // once it is escaped if it still has pointers to it in order to
140     // replace any use of those pointers by the corresponding
141     // materialization
142     enum class Kind { Escaped, Object, Activation, Function, GeneratorFunction, AsyncFunction, AsyncGeneratorFunction };
143
144     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
145         : m_identifier(identifier)
146         , m_kind(kind)
147     {
148     }
149
150
151     const HashMap<PromotedLocationDescriptor, Node*>& fields() const
152     {
153         return m_fields;
154     }
155
156     Node* get(PromotedLocationDescriptor descriptor)
157     {
158         return m_fields.get(descriptor);
159     }
160
161     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
162     {
163         // Pointing to anything else than an unescaped local
164         // allocation is represented by simply not having the
165         // field
166         if (value)
167             m_fields.set(descriptor, value);
168         else
169             m_fields.remove(descriptor);
170         return *this;
171     }
172
173     void remove(PromotedLocationDescriptor descriptor)
174     {
175         set(descriptor, nullptr);
176     }
177
178     bool hasStructures() const
179     {
180         switch (kind()) {
181         case Kind::Object:
182             return true;
183
184         default:
185             return false;
186         }
187     }
188
189     Allocation& setStructures(const RegisteredStructureSet& structures)
190     {
191         ASSERT(hasStructures() && !structures.isEmpty());
192         m_structures = structures;
193         return *this;
194     }
195
196     Allocation& mergeStructures(const RegisteredStructureSet& structures)
197     {
198         ASSERT(hasStructures() || structures.isEmpty());
199         m_structures.merge(structures);
200         return *this;
201     }
202
203     Allocation& filterStructures(const RegisteredStructureSet& structures)
204     {
205         ASSERT(hasStructures());
206         m_structures.filter(structures);
207         RELEASE_ASSERT(!m_structures.isEmpty());
208         return *this;
209     }
210
211     const RegisteredStructureSet& structures() const
212     {
213         return m_structures;
214     }
215
216     Node* identifier() const { return m_identifier; }
217
218     Kind kind() const { return m_kind; }
219
220     bool isEscapedAllocation() const
221     {
222         return kind() == Kind::Escaped;
223     }
224
225     bool isObjectAllocation() const
226     {
227         return m_kind == Kind::Object;
228     }
229
230     bool isActivationAllocation() const
231     {
232         return m_kind == Kind::Activation;
233     }
234
235     bool isFunctionAllocation() const
236     {
237         return m_kind == Kind::Function || m_kind == Kind::GeneratorFunction || m_kind == Kind::AsyncFunction;
238     }
239
240     bool operator==(const Allocation& other) const
241     {
242         return m_identifier == other.m_identifier
243             && m_kind == other.m_kind
244             && m_fields == other.m_fields
245             && m_structures == other.m_structures;
246     }
247
248     bool operator!=(const Allocation& other) const
249     {
250         return !(*this == other);
251     }
252
253     void dump(PrintStream& out) const
254     {
255         dumpInContext(out, nullptr);
256     }
257
258     void dumpInContext(PrintStream& out, DumpContext* context) const
259     {
260         switch (m_kind) {
261         case Kind::Escaped:
262             out.print("Escaped");
263             break;
264
265         case Kind::Object:
266             out.print("Object");
267             break;
268
269         case Kind::Function:
270             out.print("Function");
271             break;
272
273         case Kind::GeneratorFunction:
274             out.print("GeneratorFunction");
275             break;
276
277         case Kind::AsyncFunction:
278             out.print("AsyncFunction");
279             break;
280
281         case Kind::AsyncGeneratorFunction:
282             out.print("AsyncGeneratorFunction");
283             break;
284         case Kind::Activation:
285             out.print("Activation");
286             break;
287         }
288         out.print("Allocation(");
289         if (!m_structures.isEmpty())
290             out.print(inContext(m_structures.toStructureSet(), context));
291         if (!m_fields.isEmpty()) {
292             if (!m_structures.isEmpty())
293                 out.print(", ");
294             out.print(mapDump(m_fields, " => #", ", "));
295         }
296         out.print(")");
297     }
298
299 private:
300     Node* m_identifier; // This is the actual node that created the allocation
301     Kind m_kind;
302     HashMap<PromotedLocationDescriptor, Node*> m_fields;
303     RegisteredStructureSet m_structures;
304 };
305
306 class LocalHeap {
307 public:
308     Allocation& newAllocation(Node* node, Allocation::Kind kind)
309     {
310         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
311         m_pointers.add(node, node);
312         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
313     }
314
315     bool isAllocation(Node* identifier) const
316     {
317         return m_allocations.contains(identifier);
318     }
319
320     // Note that this is fundamentally different from
321     // onlyLocalAllocation() below. getAllocation() takes as argument
322     // a node-as-identifier, that is, an allocation node. This
323     // allocation node doesn't have to be alive; it may only be
324     // pointed to by other nodes or allocation fields.
325     // For instance, in the following graph:
326     //
327     // Block #0
328     //  0: NewObject({})
329     //  1: NewObject({})
330     //  -: PutByOffset(@0, @1, x)
331     //  -: PutStructure(@0, {x:0})
332     //  2: GetByOffset(@0, x)
333     //  -: Jump(#1)
334     //
335     // Block #1
336     //  -: Return(@2)
337     //
338     // At head of block #1, the only reachable allocation is #@1,
339     // which can be reached through node @2. Thus, getAllocation(#@1)
340     // contains the appropriate metadata for this allocation, but
341     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
342     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
343     // is the same as getAllocation(#@1), while getAllocation(#@2)
344     // does not make sense since @2 is not an allocation node.
345     //
346     // This is meant to be used when the node is already known to be
347     // an identifier (i.e. an allocation) - probably because it was
348     // found as value of a field or pointer in the current heap, or
349     // was the result of a call to follow(). In any other cases (such
350     // as when doing anything while traversing the graph), the
351     // appropriate function to call is probably onlyLocalAllocation.
352     Allocation& getAllocation(Node* identifier)
353     {
354         auto iter = m_allocations.find(identifier);
355         ASSERT(iter != m_allocations.end());
356         return iter->value;
357     }
358
359     void newPointer(Node* node, Node* identifier)
360     {
361         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
362         ASSERT(isAllocation(identifier));
363         m_pointers.add(node, identifier);
364     }
365
366     // follow solves the points-to problem. Given a live node, which
367     // may be either an allocation itself or a heap read (e.g. a
368     // GetByOffset node), it returns the corresponding allocation
369     // node, if there is one. If the argument node is neither an
370     // allocation or a heap read, or may point to different nodes,
371     // nullptr will be returned. Note that a node that points to
372     // different nodes can never point to an unescaped local
373     // allocation.
374     Node* follow(Node* node) const
375     {
376         auto iter = m_pointers.find(node);
377         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
378         return iter == m_pointers.end() ? nullptr : iter->value;
379     }
380
381     Node* follow(PromotedHeapLocation location) const
382     {
383         const Allocation& base = m_allocations.find(location.base())->value;
384         auto iter = base.fields().find(location.descriptor());
385
386         if (iter == base.fields().end())
387             return nullptr;
388
389         return iter->value;
390     }
391
392     // onlyLocalAllocation find the corresponding allocation metadata
393     // for any live node. onlyLocalAllocation(node) is essentially
394     // getAllocation(follow(node)), with appropriate null handling.
395     Allocation* onlyLocalAllocation(Node* node)
396     {
397         Node* identifier = follow(node);
398         if (!identifier)
399             return nullptr;
400
401         return &getAllocation(identifier);
402     }
403
404     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
405     {
406         Node* identifier = follow(location);
407         if (!identifier)
408             return nullptr;
409
410         return &getAllocation(identifier);
411     }
412
413     // This allows us to store the escapees only when necessary. If
414     // set, the current escapees can be retrieved at any time using
415     // takeEscapees(), which will clear the cached set of escapees;
416     // otherwise the heap won't remember escaping allocations.
417     void setWantEscapees()
418     {
419         m_wantEscapees = true;
420     }
421
422     HashMap<Node*, Allocation> takeEscapees()
423     {
424         return WTFMove(m_escapees);
425     }
426
427     void escape(Node* node)
428     {
429         Node* identifier = follow(node);
430         if (!identifier)
431             return;
432
433         escapeAllocation(identifier);
434     }
435
436     void merge(const LocalHeap& other)
437     {
438         assertIsValid();
439         other.assertIsValid();
440         ASSERT(!m_wantEscapees);
441
442         if (!reached()) {
443             ASSERT(other.reached());
444             *this = other;
445             return;
446         }
447
448         NodeSet toEscape;
449
450         for (auto& allocationEntry : other.m_allocations)
451             m_allocations.add(allocationEntry.key, allocationEntry.value);
452         for (auto& allocationEntry : m_allocations) {
453             auto allocationIter = other.m_allocations.find(allocationEntry.key);
454
455             // If we have it and they don't, it died for them but we
456             // are keeping it alive from another field somewhere.
457             // There is nothing to do - we will be escaped
458             // automatically when we handle that other field.
459             // This will also happen for allocation that we have and
460             // they don't, and all of those will get pruned.
461             if (allocationIter == other.m_allocations.end())
462                 continue;
463
464             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
465                 toEscape.addVoid(allocationEntry.key);
466                 for (const auto& fieldEntry : allocationIter->value.fields())
467                     toEscape.addVoid(fieldEntry.value);
468             } else {
469                 mergePointerSets(
470                     allocationEntry.value.fields(), allocationIter->value.fields(),
471                     [&] (Node* identifier) {
472                         toEscape.addVoid(identifier);
473                     },
474                     [&] (PromotedLocationDescriptor field) {
475                         allocationEntry.value.remove(field);
476                     });
477                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
478             }
479         }
480
481         mergePointerSets(m_pointers, other.m_pointers,
482             [&] (Node* identifier) {
483                 toEscape.addVoid(identifier);
484             },
485             [&] (Node* field) {
486                 m_pointers.remove(field);
487             });
488
489         for (Node* identifier : toEscape)
490             escapeAllocation(identifier);
491
492         if (!ASSERT_DISABLED) {
493             for (const auto& entry : m_allocations)
494                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
495         }
496
497         // If there is no remaining pointer to an allocation, we can
498         // remove it. This should only happen for escaped allocations,
499         // because we only merge liveness-pruned heaps in the first
500         // place.
501         prune();
502
503         assertIsValid();
504     }
505
506     void pruneByLiveness(const NodeSet& live)
507     {
508         Vector<Node*> toRemove;
509         for (const auto& entry : m_pointers) {
510             if (!live.contains(entry.key))
511                 toRemove.append(entry.key);
512         }
513         for (Node* node : toRemove)
514             m_pointers.remove(node);
515
516         prune();
517     }
518
519     void assertIsValid() const
520     {
521         if (ASSERT_DISABLED)
522             return;
523
524         // Pointers should point to an actual allocation
525         for (const auto& entry : m_pointers) {
526             ASSERT_UNUSED(entry, entry.value);
527             ASSERT(m_allocations.contains(entry.value));
528         }
529
530         for (const auto& allocationEntry : m_allocations) {
531             // Fields should point to an actual allocation
532             for (const auto& fieldEntry : allocationEntry.value.fields()) {
533                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
534                 ASSERT(m_allocations.contains(fieldEntry.value));
535             }
536         }
537     }
538
539     bool operator==(const LocalHeap& other) const
540     {
541         assertIsValid();
542         other.assertIsValid();
543         return m_allocations == other.m_allocations
544             && m_pointers == other.m_pointers;
545     }
546
547     bool operator!=(const LocalHeap& other) const
548     {
549         return !(*this == other);
550     }
551
552     const HashMap<Node*, Allocation>& allocations() const
553     {
554         return m_allocations;
555     }
556
557     const HashMap<Node*, Node*>& pointers() const
558     {
559         return m_pointers;
560     }
561
562     void dump(PrintStream& out) const
563     {
564         out.print("  Allocations:\n");
565         for (const auto& entry : m_allocations)
566             out.print("    #", entry.key, ": ", entry.value, "\n");
567         out.print("  Pointers:\n");
568         for (const auto& entry : m_pointers)
569             out.print("    ", entry.key, " => #", entry.value, "\n");
570     }
571
572     bool reached() const
573     {
574         return m_reached;
575     }
576
577     void setReached()
578     {
579         m_reached = true;
580     }
581
582 private:
583     // When we merge two heaps, we escape all fields of allocations,
584     // unless they point to the same thing in both heaps.
585     // The reason for this is that it allows us not to do extra work
586     // for diamond graphs where we would otherwise have to check
587     // whether we have a single definition or not, which would be
588     // cumbersome.
589     //
590     // Note that we should try to unify nodes even when they are not
591     // from the same allocation; for instance we should be able to
592     // completely eliminate all allocations from the following graph:
593     //
594     // Block #0
595     //  0: NewObject({})
596     //  -: Branch(#1, #2)
597     //
598     // Block #1
599     //  1: NewObject({})
600     //  -: PutByOffset(@1, "left", val)
601     //  -: PutStructure(@1, {val:0})
602     //  -: PutByOffset(@0, @1, x)
603     //  -: PutStructure(@0, {x:0})
604     //  -: Jump(#3)
605     //
606     // Block #2
607     //  2: NewObject({})
608     //  -: PutByOffset(@2, "right", val)
609     //  -: PutStructure(@2, {val:0})
610     //  -: PutByOffset(@0, @2, x)
611     //  -: PutStructure(@0, {x:0})
612     //  -: Jump(#3)
613     //
614     // Block #3:
615     //  3: GetByOffset(@0, x)
616     //  4: GetByOffset(@3, val)
617     //  -: Return(@4)
618     template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
619     void mergePointerSets(
620         const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
621         const EscapeFunctor& escape, const RemoveFunctor& remove)
622     {
623         Vector<Key> toRemove;
624         for (const auto& entry : my) {
625             auto iter = their.find(entry.key);
626             if (iter == their.end()) {
627                 toRemove.append(entry.key);
628                 escape(entry.value);
629             } else if (iter->value != entry.value) {
630                 toRemove.append(entry.key);
631                 escape(entry.value);
632                 escape(iter->value);
633             }
634         }
635         for (const auto& entry : their) {
636             if (my.contains(entry.key))
637                 continue;
638             escape(entry.value);
639         }
640         for (Key key : toRemove)
641             remove(key);
642     }
643
644     void escapeAllocation(Node* identifier)
645     {
646         Allocation& allocation = getAllocation(identifier);
647         if (allocation.isEscapedAllocation())
648             return;
649
650         Allocation unescaped = WTFMove(allocation);
651         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
652
653         for (const auto& entry : unescaped.fields())
654             escapeAllocation(entry.value);
655
656         if (m_wantEscapees)
657             m_escapees.add(unescaped.identifier(), WTFMove(unescaped));
658     }
659
660     void prune()
661     {
662         NodeSet reachable;
663         for (const auto& entry : m_pointers)
664             reachable.addVoid(entry.value);
665
666         // Repeatedly mark as reachable allocations in fields of other
667         // reachable allocations
668         {
669             Vector<Node*> worklist;
670             worklist.appendRange(reachable.begin(), reachable.end());
671
672             while (!worklist.isEmpty()) {
673                 Node* identifier = worklist.takeLast();
674                 Allocation& allocation = m_allocations.find(identifier)->value;
675                 for (const auto& entry : allocation.fields()) {
676                     if (reachable.add(entry.value).isNewEntry)
677                         worklist.append(entry.value);
678                 }
679             }
680         }
681
682         // Remove unreachable allocations
683         {
684             Vector<Node*> toRemove;
685             for (const auto& entry : m_allocations) {
686                 if (!reachable.contains(entry.key))
687                     toRemove.append(entry.key);
688             }
689             for (Node* identifier : toRemove)
690                 m_allocations.remove(identifier);
691         }
692     }
693
694     bool m_reached = false;
695     HashMap<Node*, Node*> m_pointers;
696     HashMap<Node*, Allocation> m_allocations;
697
698     bool m_wantEscapees = false;
699     HashMap<Node*, Allocation> m_escapees;
700 };
701
702 class ObjectAllocationSinkingPhase : public Phase {
703 public:
704     ObjectAllocationSinkingPhase(Graph& graph)
705         : Phase(graph, "object allocation elimination")
706         , m_pointerSSA(graph)
707         , m_allocationSSA(graph)
708         , m_insertionSet(graph)
709     {
710     }
711
712     bool run()
713     {
714         ASSERT(m_graph.m_form == SSA);
715         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
716
717         if (!performSinking())
718             return false;
719
720         if (verbose) {
721             dataLog("Graph after elimination:\n");
722             m_graph.dump();
723         }
724
725         return true;
726     }
727
728 private:
729     bool performSinking()
730     {
731         m_graph.computeRefCounts();
732         m_graph.initializeNodeOwners();
733         m_graph.ensureSSADominators();
734         performLivenessAnalysis(m_graph);
735         performOSRAvailabilityAnalysis(m_graph);
736         m_combinedLiveness = CombinedLiveness(m_graph);
737
738         CString graphBeforeSinking;
739         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
740             StringPrintStream out;
741             m_graph.dump(out);
742             graphBeforeSinking = out.toCString();
743         }
744
745         if (verbose) {
746             dataLog("Graph before elimination:\n");
747             m_graph.dump();
748         }
749
750         performAnalysis();
751
752         if (!determineSinkCandidates())
753             return false;
754
755         if (verbose) {
756             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
757                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
758                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
759             }
760         }
761
762         promoteLocalHeap();
763
764         if (Options::validateGraphAtEachPhase())
765             DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
766         return true;
767     }
768
769     void performAnalysis()
770     {
771         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
772         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
773
774         bool changed;
775         do {
776             if (verbose)
777                 dataLog("Doing iteration of escape analysis.\n");
778             changed = false;
779
780             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
781                 m_heapAtHead[block].setReached();
782                 m_heap = m_heapAtHead[block];
783
784                 for (Node* node : *block) {
785                     handleNode(
786                         node,
787                         [] (PromotedHeapLocation, LazyNode) { },
788                         [&] (PromotedHeapLocation) -> Node* {
789                             return nullptr;
790                         });
791                 }
792
793                 if (m_heap == m_heapAtTail[block])
794                     continue;
795
796                 m_heapAtTail[block] = m_heap;
797                 changed = true;
798
799                 m_heap.assertIsValid();
800
801                 // We keep only pointers that are live, and only
802                 // allocations that are either live, pointed to by a
803                 // live pointer, or (recursively) stored in a field of
804                 // a live allocation.
805                 //
806                 // This means we can accidentaly leak non-dominating
807                 // nodes into the successor. However, due to the
808                 // non-dominance property, we are guaranteed that the
809                 // successor has at least one predecessor that is not
810                 // dominated either: this means any reference to a
811                 // non-dominating allocation in the successor will
812                 // trigger an escape and get pruned during the merge.
813                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
814
815                 for (BasicBlock* successorBlock : block->successors())
816                     m_heapAtHead[successorBlock].merge(m_heap);
817             }
818         } while (changed);
819     }
820
821     template<typename WriteFunctor, typename ResolveFunctor>
822     void handleNode(
823         Node* node,
824         const WriteFunctor& heapWrite,
825         const ResolveFunctor& heapResolve)
826     {
827         m_heap.assertIsValid();
828         ASSERT(m_heap.takeEscapees().isEmpty());
829
830         Allocation* target = nullptr;
831         HashMap<PromotedLocationDescriptor, LazyNode> writes;
832         PromotedLocationDescriptor exactRead;
833
834         switch (node->op()) {
835         case NewObject:
836             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
837             target->setStructures(node->structure());
838             writes.add(
839                 StructurePLoc, LazyNode(m_graph.freeze(node->structure().get())));
840             break;
841
842         case NewFunction:
843         case NewGeneratorFunction:
844         case NewAsyncGeneratorFunction:
845         case NewAsyncFunction: {
846             if (isStillValid(node->castOperand<FunctionExecutable*>()->singletonFunction())) {
847                 m_heap.escape(node->child1().node());
848                 break;
849             }
850
851             if (node->op() == NewGeneratorFunction)
852                 target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction);
853             else if (node->op() == NewAsyncFunction)
854                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncFunction);
855             else if (node->op() == NewAsyncGeneratorFunction)
856                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncGeneratorFunction);
857             else
858                 target = &m_heap.newAllocation(node, Allocation::Kind::Function);
859
860             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
861             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
862             break;
863         }
864
865         case CreateActivation: {
866             if (isStillValid(node->castOperand<SymbolTable*>()->singletonScope())) {
867                 m_heap.escape(node->child1().node());
868                 break;
869             }
870             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
871             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
872             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
873             {
874                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
875                 ConcurrentJSLocker locker(symbolTable->m_lock);
876                 LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
877                 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
878                     writes.add(
879                         PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
880                         initialValue);
881                 }
882             }
883             break;
884         }
885
886         case PutStructure:
887             target = m_heap.onlyLocalAllocation(node->child1().node());
888             if (target && target->isObjectAllocation()) {
889                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next.get()))));
890                 target->setStructures(node->transition()->next);
891             } else
892                 m_heap.escape(node->child1().node());
893             break;
894
895         case CheckStructureOrEmpty:
896         case CheckStructure: {
897             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
898             if (allocation && allocation->isObjectAllocation()) {
899                 RegisteredStructureSet filteredStructures = allocation->structures();
900                 filteredStructures.filter(node->structureSet());
901                 if (filteredStructures.isEmpty()) {
902                     // FIXME: Write a test for this:
903                     // https://bugs.webkit.org/show_bug.cgi?id=174322
904                     m_heap.escape(node->child1().node());
905                     break;
906                 }
907                 allocation->setStructures(filteredStructures);
908                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
909                     node->convertToCheckStructureImmediate(value);
910             } else
911                 m_heap.escape(node->child1().node());
912             break;
913         }
914
915         case GetByOffset:
916         case GetGetterSetterByOffset:
917             target = m_heap.onlyLocalAllocation(node->child2().node());
918             if (target && target->isObjectAllocation()) {
919                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
920                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
921             } else {
922                 m_heap.escape(node->child1().node());
923                 m_heap.escape(node->child2().node());
924             }
925             break;
926
927         case MultiGetByOffset: {
928             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
929             if (allocation && allocation->isObjectAllocation()) {
930                 MultiGetByOffsetData& data = node->multiGetByOffsetData();
931                 RegisteredStructureSet validStructures;
932                 bool hasInvalidStructures = false;
933                 for (const auto& multiGetByOffsetCase : data.cases) {
934                     if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
935                         continue;
936
937                     switch (multiGetByOffsetCase.method().kind()) {
938                     case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
939                     case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
940                         hasInvalidStructures = true;
941                         break;
942
943                     case GetByOffsetMethod::Load: // We're good
944                         validStructures.merge(multiGetByOffsetCase.set());
945                         break;
946
947                     default:
948                         RELEASE_ASSERT_NOT_REACHED();
949                     }
950                 }
951                 if (hasInvalidStructures || validStructures.isEmpty()) {
952                     m_heap.escape(node->child1().node());
953                     break;
954                 }
955                 unsigned identifierNumber = data.identifierNumber;
956                 PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
957                 if (Node* value = heapResolve(location)) {
958                     if (allocation->structures().isSubsetOf(validStructures))
959                         node->replaceWith(value);
960                     else {
961                         Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
962                         ASSERT(structure);
963                         allocation->filterStructures(validStructures);
964                         node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
965                         node->convertToCheckStructureImmediate(structure);
966                         node->setReplacement(value);
967                     }
968                 } else if (!allocation->structures().isSubsetOf(validStructures)) {
969                     // Even though we don't need the result here, we still need
970                     // to make the call to tell our caller that we could need
971                     // the StructurePLoc.
972                     // The reason for this is that when we decide not to sink a
973                     // node, we will still lower any read to its fields before
974                     // it escapes (which are usually reads across a function
975                     // call that DFGClobberize can't handle) - but we only do
976                     // this for PromotedHeapLocations that we have seen read
977                     // during the analysis!
978                     heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
979                     allocation->filterStructures(validStructures);
980                 }
981                 Node* identifier = allocation->get(location.descriptor());
982                 if (identifier)
983                     m_heap.newPointer(node, identifier);
984             } else
985                 m_heap.escape(node->child1().node());
986             break;
987         }
988
989         case PutByOffset:
990             target = m_heap.onlyLocalAllocation(node->child2().node());
991             if (target && target->isObjectAllocation()) {
992                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
993                 writes.add(
994                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
995                     LazyNode(node->child3().node()));
996             } else {
997                 m_heap.escape(node->child1().node());
998                 m_heap.escape(node->child2().node());
999                 m_heap.escape(node->child3().node());
1000             }
1001             break;
1002
1003         case GetClosureVar:
1004             target = m_heap.onlyLocalAllocation(node->child1().node());
1005             if (target && target->isActivationAllocation()) {
1006                 exactRead =
1007                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
1008             } else
1009                 m_heap.escape(node->child1().node());
1010             break;
1011
1012         case PutClosureVar:
1013             target = m_heap.onlyLocalAllocation(node->child1().node());
1014             if (target && target->isActivationAllocation()) {
1015                 writes.add(
1016                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
1017                     LazyNode(node->child2().node()));
1018             } else {
1019                 m_heap.escape(node->child1().node());
1020                 m_heap.escape(node->child2().node());
1021             }
1022             break;
1023
1024         case SkipScope:
1025             target = m_heap.onlyLocalAllocation(node->child1().node());
1026             if (target && target->isActivationAllocation())
1027                 exactRead = ActivationScopePLoc;
1028             else
1029                 m_heap.escape(node->child1().node());
1030             break;
1031
1032         case GetExecutable:
1033             target = m_heap.onlyLocalAllocation(node->child1().node());
1034             if (target && target->isFunctionAllocation())
1035                 exactRead = FunctionExecutablePLoc;
1036             else
1037                 m_heap.escape(node->child1().node());
1038             break;
1039
1040         case GetScope:
1041             target = m_heap.onlyLocalAllocation(node->child1().node());
1042             if (target && target->isFunctionAllocation())
1043                 exactRead = FunctionActivationPLoc;
1044             else
1045                 m_heap.escape(node->child1().node());
1046             break;
1047
1048         case Check:
1049             m_graph.doToChildren(
1050                 node,
1051                 [&] (Edge edge) {
1052                     if (edge.willNotHaveCheck())
1053                         return;
1054
1055                     if (alreadyChecked(edge.useKind(), SpecObject))
1056                         return;
1057
1058                     m_heap.escape(edge.node());
1059                 });
1060             break;
1061
1062         case MovHint:
1063         case PutHint:
1064             // Handled by OSR availability analysis
1065             break;
1066
1067         default:
1068             m_graph.doToChildren(
1069                 node,
1070                 [&] (Edge edge) {
1071                     m_heap.escape(edge.node());
1072                 });
1073             break;
1074         }
1075
1076         if (exactRead) {
1077             ASSERT(target);
1078             ASSERT(writes.isEmpty());
1079             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1080                 ASSERT(!value->replacement());
1081                 node->replaceWith(value);
1082             }
1083             Node* identifier = target->get(exactRead);
1084             if (identifier)
1085                 m_heap.newPointer(node, identifier);
1086         }
1087
1088         for (auto entry : writes) {
1089             ASSERT(target);
1090             if (entry.value.isNode())
1091                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1092             else
1093                 target->remove(entry.key);
1094             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1095         }
1096
1097         m_heap.assertIsValid();
1098     }
1099
1100     bool determineSinkCandidates()
1101     {
1102         m_sinkCandidates.clear();
1103         m_materializationToEscapee.clear();
1104         m_materializationSiteToMaterializations.clear();
1105         m_materializationSiteToRecoveries.clear();
1106         m_materializationSiteToHints.clear();
1107
1108         // Logically we wish to consider every allocation and sink
1109         // it. However, it is probably not profitable to sink an
1110         // allocation that will always escape. So, we only sink an
1111         // allocation if one of the following is true:
1112         //
1113         // 1) There exists a basic block with only backwards outgoing
1114         //    edges (or no outgoing edges) in which the node wasn't
1115         //    materialized. This is meant to catch
1116         //    effectively-infinite loops in which we don't need to
1117         //    have allocated the object.
1118         //
1119         // 2) There exists a basic block at the tail of which the node
1120         //    is dead and not materialized.
1121         //
1122         // 3) The sum of execution counts of the materializations is
1123         //    less than the sum of execution counts of the original
1124         //    node.
1125         //
1126         // We currently implement only rule #2.
1127         // FIXME: Implement the two other rules.
1128         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1129         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1130         //
1131         // However, these rules allow for a sunk object to be put into
1132         // a non-sunk one, which we don't support. We could solve this
1133         // by supporting PutHints on local allocations, making these
1134         // objects only partially correct, and we would need to adapt
1135         // the OSR availability analysis and OSR exit to handle
1136         // this. This would be totally doable, but would create a
1137         // super rare, and thus bug-prone, code path.
1138         // So, instead, we need to implement one of the following
1139         // closure rules:
1140         //
1141         // 1) If we put a sink candidate into a local allocation that
1142         //    is not a sink candidate, change our minds and don't
1143         //    actually sink the sink candidate.
1144         //
1145         // 2) If we put a sink candidate into a local allocation, that
1146         //    allocation becomes a sink candidate as well.
1147         //
1148         // We currently choose to implement closure rule #2.
1149         HashMap<Node*, Vector<Node*>> dependencies;
1150         bool hasUnescapedReads = false;
1151         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1152             m_heap = m_heapAtHead[block];
1153
1154             for (Node* node : *block) {
1155                 handleNode(
1156                     node,
1157                     [&] (PromotedHeapLocation location, LazyNode value) {
1158                         if (!value.isNode())
1159                             return;
1160
1161                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1162                         if (allocation && !allocation->isEscapedAllocation())
1163                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1164                     },
1165                     [&] (PromotedHeapLocation) -> Node* {
1166                         hasUnescapedReads = true;
1167                         return nullptr;
1168                     });
1169             }
1170
1171             // The sink candidates are initially the unescaped
1172             // allocations dying at tail of blocks
1173             NodeSet allocations;
1174             for (const auto& entry : m_heap.allocations()) {
1175                 if (!entry.value.isEscapedAllocation())
1176                     allocations.addVoid(entry.key);
1177             }
1178
1179             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1180
1181             for (Node* identifier : allocations) {
1182                 if (!m_heap.isAllocation(identifier))
1183                     m_sinkCandidates.addVoid(identifier);
1184             }
1185         }
1186
1187         // Ensure that the set of sink candidates is closed for put operations
1188         Vector<Node*> worklist;
1189         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1190
1191         while (!worklist.isEmpty()) {
1192             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1193                 if (m_sinkCandidates.add(identifier).isNewEntry)
1194                     worklist.append(identifier);
1195             }
1196         }
1197
1198         if (m_sinkCandidates.isEmpty())
1199             return hasUnescapedReads;
1200
1201         if (verbose)
1202             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1203
1204         // Create the materialization nodes
1205         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1206             m_heap = m_heapAtHead[block];
1207             m_heap.setWantEscapees();
1208
1209             for (Node* node : *block) {
1210                 handleNode(
1211                     node,
1212                     [] (PromotedHeapLocation, LazyNode) { },
1213                     [] (PromotedHeapLocation) -> Node* {
1214                         return nullptr;
1215                     });
1216                 auto escapees = m_heap.takeEscapees();
1217                 if (!escapees.isEmpty())
1218                     placeMaterializations(escapees, node);
1219             }
1220
1221             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1222
1223             {
1224                 HashMap<Node*, Allocation> escapingOnEdge;
1225                 for (const auto& entry : m_heap.allocations()) {
1226                     if (entry.value.isEscapedAllocation())
1227                         continue;
1228
1229                     bool mustEscape = false;
1230                     for (BasicBlock* successorBlock : block->successors()) {
1231                         if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1232                             || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1233                             mustEscape = true;
1234                     }
1235
1236                     if (mustEscape)
1237                         escapingOnEdge.add(entry.key, entry.value);
1238                 }
1239                 placeMaterializations(WTFMove(escapingOnEdge), block->terminal());
1240             }
1241         }
1242
1243         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1244     }
1245
1246     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1247     {
1248         // We don't create materializations if the escapee is not a
1249         // sink candidate
1250         Vector<Node*> toRemove;
1251         for (const auto& entry : escapees) {
1252             if (!m_sinkCandidates.contains(entry.key))
1253                 toRemove.append(entry.key);
1254         }
1255         for (Node* identifier : toRemove)
1256             escapees.remove(identifier);
1257
1258         if (escapees.isEmpty())
1259             return;
1260
1261         // First collect the hints that will be needed when the node
1262         // we materialize is still stored into other unescaped sink candidates.
1263         // The way to interpret this vector is:
1264         //
1265         // PromotedHeapLocation(NotEscapedAllocation, field) = identifierAllocation
1266         //
1267         // e.g:
1268         // PromotedHeapLocation(@PhantomNewFunction, FunctionActivationPLoc) = IdentifierOf(@MaterializeCreateActivation)
1269         // or:
1270         // PromotedHeapLocation(@PhantomCreateActivation, ClosureVarPLoc(x)) = IdentifierOf(@NewFunction)
1271         //
1272         // When the rhs of the `=` is to be materialized at this `where` point in the program
1273         // and IdentifierOf(Materialization) is the original sunken allocation of the materialization.
1274         //
1275         // The reason we need to collect all the `identifiers` here is that
1276         // we may materialize multiple versions of the allocation along control
1277         // flow edges. We will PutHint these values along those edges. However,
1278         // we also need to PutHint them when we join and have a Phi of the allocations.
1279         Vector<std::pair<PromotedHeapLocation, Node*>> hints;
1280         for (const auto& entry : m_heap.allocations()) {
1281             if (escapees.contains(entry.key))
1282                 continue;
1283
1284             for (const auto& field : entry.value.fields()) {
1285                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1286                 auto iter = escapees.find(field.value);
1287                 if (iter != escapees.end()) {
1288                     ASSERT(m_sinkCandidates.contains(field.value));
1289                     hints.append(std::make_pair(PromotedHeapLocation(entry.key, field.key), field.value));
1290                 }
1291             }
1292         }
1293
1294         // Now we need to order the materialization. Any order is
1295         // valid (as long as we materialize a node first if it is
1296         // needed for the materialization of another node, e.g. a
1297         // function's activation must be materialized before the
1298         // function itself), but we want to try minimizing the number
1299         // of times we have to place Puts to close cycles after a
1300         // materialization. In other words, we are trying to find the
1301         // minimum number of materializations to remove from the
1302         // materialization graph to make it a DAG, known as the
1303         // (vertex) feedback set problem. Unfortunately, this is a
1304         // NP-hard problem, which we don't want to solve exactly.
1305         //
1306         // Instead, we use a simple greedy procedure, that procedes as
1307         // follow:
1308         //  - While there is at least one node with no outgoing edge
1309         //    amongst the remaining materializations, materialize it
1310         //    first
1311         //
1312         //  - Similarily, while there is at least one node with no
1313         //    incoming edge amongst the remaining materializations,
1314         //    materialize it last.
1315         //
1316         //  - When both previous conditions are false, we have an
1317         //    actual cycle, and we need to pick a node to
1318         //    materialize. We try greedily to remove the "pressure" on
1319         //    the remaining nodes by choosing the node with maximum
1320         //    |incoming edges| * |outgoing edges| as a measure of how
1321         //    "central" to the graph it is. We materialize it first,
1322         //    so that all the recoveries will be Puts of things into
1323         //    it (rather than Puts of the materialization into other
1324         //    objects), which means we will have a single
1325         //    StoreBarrier.
1326
1327
1328         // Compute dependencies between materializations
1329         HashMap<Node*, NodeSet> dependencies;
1330         HashMap<Node*, NodeSet> reverseDependencies;
1331         HashMap<Node*, NodeSet> forMaterialization;
1332         for (const auto& entry : escapees) {
1333             auto& myDependencies = dependencies.add(entry.key, NodeSet()).iterator->value;
1334             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, NodeSet()).iterator->value;
1335             reverseDependencies.add(entry.key, NodeSet());
1336             for (const auto& field : entry.value.fields()) {
1337                 if (escapees.contains(field.value) && field.value != entry.key) {
1338                     myDependencies.addVoid(field.value);
1339                     reverseDependencies.add(field.value, NodeSet()).iterator->value.addVoid(entry.key);
1340                     if (field.key.neededForMaterialization())
1341                         myDependenciesForMaterialization.addVoid(field.value);
1342                 }
1343             }
1344         }
1345
1346         // Helper function to update the materialized set and the
1347         // dependencies
1348         NodeSet materialized;
1349         auto materialize = [&] (Node* identifier) {
1350             materialized.addVoid(identifier);
1351             for (Node* dep : dependencies.get(identifier))
1352                 reverseDependencies.find(dep)->value.remove(identifier);
1353             for (Node* rdep : reverseDependencies.get(identifier)) {
1354                 dependencies.find(rdep)->value.remove(identifier);
1355                 forMaterialization.find(rdep)->value.remove(identifier);
1356             }
1357             dependencies.remove(identifier);
1358             reverseDependencies.remove(identifier);
1359             forMaterialization.remove(identifier);
1360         };
1361
1362         // Nodes without remaining unmaterialized fields will be
1363         // materialized first - amongst the remaining unmaterialized
1364         // nodes
1365         std::list<Allocation, FastAllocator<Allocation>> toMaterialize;
1366         auto firstPos = toMaterialize.begin();
1367         auto materializeFirst = [&] (Allocation&& allocation) {
1368             materialize(allocation.identifier());
1369             // We need to insert *after* the current position
1370             if (firstPos != toMaterialize.end())
1371                 ++firstPos;
1372             firstPos = toMaterialize.insert(firstPos, WTFMove(allocation));
1373         };
1374
1375         // Nodes that no other unmaterialized node points to will be
1376         // materialized last - amongst the remaining unmaterialized
1377         // nodes
1378         auto lastPos = toMaterialize.end();
1379         auto materializeLast = [&] (Allocation&& allocation) {
1380             materialize(allocation.identifier());
1381             lastPos = toMaterialize.insert(lastPos, WTFMove(allocation));
1382         };
1383
1384         // These are the promoted locations that contains some of the
1385         // allocations we are currently escaping. If they are a location on
1386         // some other allocation we are currently materializing, we will need
1387         // to "recover" their value with a real put once the corresponding
1388         // allocation is materialized; if they are a location on some other
1389         // not-yet-materialized allocation, we will need a PutHint.
1390         Vector<PromotedHeapLocation> toRecover;
1391
1392         // This loop does the actual cycle breaking
1393         while (!escapees.isEmpty()) {
1394             materialized.clear();
1395
1396             // Materialize nodes that won't require recoveries if we can
1397             for (auto& entry : escapees) {
1398                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1399                     continue;
1400
1401                 if (dependencies.find(entry.key)->value.isEmpty()) {
1402                     materializeFirst(WTFMove(entry.value));
1403                     continue;
1404                 }
1405
1406                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1407                     materializeLast(WTFMove(entry.value));
1408                     continue;
1409                 }
1410             }
1411
1412             // We reach this only if there is an actual cycle that needs
1413             // breaking. Because we do not want to solve a NP-hard problem
1414             // here, we just heuristically pick a node and materialize it
1415             // first.
1416             if (materialized.isEmpty()) {
1417                 uint64_t maxEvaluation = 0;
1418                 Allocation* bestAllocation = nullptr;
1419                 for (auto& entry : escapees) {
1420                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1421                         continue;
1422
1423                     uint64_t evaluation =
1424                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1425                     if (evaluation > maxEvaluation) {
1426                         maxEvaluation = evaluation;
1427                         bestAllocation = &entry.value;
1428                     }
1429                 }
1430                 RELEASE_ASSERT(maxEvaluation > 0);
1431
1432                 materializeFirst(WTFMove(*bestAllocation));
1433             }
1434             RELEASE_ASSERT(!materialized.isEmpty());
1435
1436             for (Node* identifier : materialized)
1437                 escapees.remove(identifier);
1438         }
1439
1440         materialized.clear();
1441
1442         NodeSet escaped;
1443         for (const Allocation& allocation : toMaterialize)
1444             escaped.addVoid(allocation.identifier());
1445         for (const Allocation& allocation : toMaterialize) {
1446             for (const auto& field : allocation.fields()) {
1447                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1448                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1449             }
1450             materialized.addVoid(allocation.identifier());
1451         }
1452
1453         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1454             where, Vector<Node*>()).iterator->value;
1455
1456         for (const Allocation& allocation : toMaterialize) {
1457             Node* materialization = createMaterialization(allocation, where);
1458             materializations.append(materialization);
1459             m_materializationToEscapee.add(materialization, allocation.identifier());
1460         }
1461
1462         if (!toRecover.isEmpty()) {
1463             m_materializationSiteToRecoveries.add(
1464                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1465         }
1466
1467         // The hints need to be after the "real" recoveries so that we
1468         // don't hint not-yet-complete objects
1469         m_materializationSiteToHints.add(
1470             where, Vector<std::pair<PromotedHeapLocation, Node*>>()).iterator->value.appendVector(hints);
1471     }
1472
1473     Node* createMaterialization(const Allocation& allocation, Node* where)
1474     {
1475         // FIXME: This is the only place where we actually use the
1476         // fact that an allocation's identifier is indeed the node
1477         // that created the allocation.
1478         switch (allocation.kind()) {
1479         case Allocation::Kind::Object: {
1480             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1481
1482             return m_graph.addNode(
1483                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1484                 where->origin.withSemantic(allocation.identifier()->origin.semantic),
1485                 OpInfo(m_graph.addStructureSet(allocation.structures())), OpInfo(data), 0, 0);
1486         }
1487
1488         case Allocation::Kind::AsyncGeneratorFunction:
1489         case Allocation::Kind::AsyncFunction:
1490         case Allocation::Kind::GeneratorFunction:
1491         case Allocation::Kind::Function: {
1492             FrozenValue* executable = allocation.identifier()->cellOperand();
1493             
1494             NodeType nodeType;
1495             switch (allocation.kind()) {
1496             case Allocation::Kind::GeneratorFunction:
1497                 nodeType = NewGeneratorFunction;
1498                 break;
1499             case Allocation::Kind::AsyncGeneratorFunction:
1500                 nodeType = NewAsyncGeneratorFunction;
1501                 break;
1502             case Allocation::Kind::AsyncFunction:
1503                 nodeType = NewAsyncFunction;
1504                 break;
1505             default:
1506                 nodeType = NewFunction;
1507             }
1508
1509             return m_graph.addNode(
1510                 allocation.identifier()->prediction(), nodeType,
1511                 where->origin.withSemantic(
1512                     allocation.identifier()->origin.semantic),
1513                 OpInfo(executable));
1514             break;
1515         }
1516
1517         case Allocation::Kind::Activation: {
1518             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1519             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1520
1521             return m_graph.addNode(
1522                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1523                 where->origin.withSemantic(
1524                     allocation.identifier()->origin.semantic),
1525                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1526         }
1527
1528         default:
1529             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1530         }
1531     }
1532
1533     void promoteLocalHeap()
1534     {
1535         // Collect the set of heap locations that we will be operating
1536         // over.
1537         HashSet<PromotedHeapLocation> locations;
1538         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1539             m_heap = m_heapAtHead[block];
1540
1541             for (Node* node : *block) {
1542                 handleNode(
1543                     node,
1544                     [&] (PromotedHeapLocation location, LazyNode) {
1545                         // If the location is not on a sink candidate,
1546                         // we only sink it if it is read
1547                         if (m_sinkCandidates.contains(location.base()))
1548                             locations.addVoid(location);
1549                     },
1550                     [&] (PromotedHeapLocation location) -> Node* {
1551                         locations.addVoid(location);
1552                         return nullptr;
1553                     });
1554             }
1555         }
1556
1557         // Figure out which locations belong to which allocations.
1558         m_locationsForAllocation.clear();
1559         for (PromotedHeapLocation location : locations) {
1560             auto result = m_locationsForAllocation.add(
1561                 location.base(),
1562                 Vector<PromotedHeapLocation>());
1563             ASSERT(!result.iterator->value.contains(location));
1564             result.iterator->value.append(location);
1565         }
1566
1567         m_pointerSSA.reset();
1568         m_allocationSSA.reset();
1569
1570         // Collect the set of "variables" that we will be sinking.
1571         m_locationToVariable.clear();
1572         m_nodeToVariable.clear();
1573         Vector<Node*> indexToNode;
1574         Vector<PromotedHeapLocation> indexToLocation;
1575
1576         for (Node* index : m_sinkCandidates) {
1577             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1578             m_nodeToVariable.add(index, variable);
1579             ASSERT(indexToNode.size() == variable->index());
1580             indexToNode.append(index);
1581         }
1582
1583         for (PromotedHeapLocation location : locations) {
1584             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1585             m_locationToVariable.add(location, variable);
1586             ASSERT(indexToLocation.size() == variable->index());
1587             indexToLocation.append(location);
1588         }
1589
1590         // We insert all required constants at top of block 0 so that
1591         // they are inserted only once and we don't clutter the graph
1592         // with useless constants everywhere
1593         HashMap<FrozenValue*, Node*> lazyMapping;
1594         if (!m_bottom)
1595             m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
1596
1597         Vector<HashSet<PromotedHeapLocation>> hintsForPhi(m_sinkCandidates.size());
1598
1599         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1600             m_heap = m_heapAtHead[block];
1601
1602             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1603                 Node* node = block->at(nodeIndex);
1604
1605                 // Some named properties can be added conditionally,
1606                 // and that would necessitate bottoms
1607                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1608                     if (location.kind() != NamedPropertyPLoc)
1609                         continue;
1610
1611                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1612                     m_pointerSSA.newDef(variable, block, m_bottom);
1613                 }
1614
1615                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1616                     Node* escapee = m_materializationToEscapee.get(materialization);
1617                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1618                 }
1619
1620                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node)) {
1621                     PromotedHeapLocation location = pair.first;
1622                     Node* identifier = pair.second;
1623                     // We're materializing `identifier` at this point, and the unmaterialized
1624                     // version is inside `location`. We track which SSA variable this belongs
1625                     // to in case we also need a PutHint for the Phi.
1626                     if (UNLIKELY(validationEnabled())) {
1627                         RELEASE_ASSERT(m_sinkCandidates.contains(location.base()));
1628                         RELEASE_ASSERT(m_sinkCandidates.contains(identifier));
1629
1630                         bool found = false;
1631                         for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1632                             // We're materializing `identifier` here. This asserts that this is indeed the case.
1633                             if (m_materializationToEscapee.get(materialization) == identifier) {
1634                                 found = true;
1635                                 break;
1636                             }
1637                         }
1638                         RELEASE_ASSERT(found);
1639                     }
1640
1641                     SSACalculator::Variable* variable = m_nodeToVariable.get(identifier);
1642                     hintsForPhi[variable->index()].addVoid(location);
1643                 }
1644
1645                 if (m_sinkCandidates.contains(node))
1646                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1647
1648                 handleNode(
1649                     node,
1650                     [&] (PromotedHeapLocation location, LazyNode value) {
1651                         if (!locations.contains(location))
1652                             return;
1653
1654                         Node* nodeValue;
1655                         if (value.isNode())
1656                             nodeValue = value.asNode();
1657                         else {
1658                             auto iter = lazyMapping.find(value.asValue());
1659                             if (iter != lazyMapping.end())
1660                                 nodeValue = iter->value;
1661                             else {
1662                                 nodeValue = value.ensureIsNode(
1663                                     m_insertionSet, m_graph.block(0), 0);
1664                                 lazyMapping.add(value.asValue(), nodeValue);
1665                             }
1666                         }
1667
1668                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1669                         m_pointerSSA.newDef(variable, block, nodeValue);
1670                     },
1671                     [] (PromotedHeapLocation) -> Node* {
1672                         return nullptr;
1673                     });
1674             }
1675         }
1676         m_insertionSet.execute(m_graph.block(0));
1677
1678         // Run the SSA calculators to create Phis
1679         m_pointerSSA.computePhis(
1680             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1681                 PromotedHeapLocation location = indexToLocation[variable->index()];
1682
1683                 // Don't create Phi nodes for fields of dead allocations
1684                 if (!m_heapAtHead[block].isAllocation(location.base()))
1685                     return nullptr;
1686
1687                 // Don't create Phi nodes once we are escaped
1688                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1689                     return nullptr;
1690
1691                 // If we point to a single allocation, we will
1692                 // directly use its materialization
1693                 if (m_heapAtHead[block].follow(location))
1694                     return nullptr;
1695
1696                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1697                 phiNode->mergeFlags(NodeResultJS);
1698                 return phiNode;
1699             });
1700
1701         m_allocationSSA.computePhis(
1702             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1703                 Node* identifier = indexToNode[variable->index()];
1704
1705                 // Don't create Phi nodes for dead allocations
1706                 if (!m_heapAtHead[block].isAllocation(identifier))
1707                     return nullptr;
1708
1709                 // Don't create Phi nodes until we are escaped
1710                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1711                     return nullptr;
1712
1713                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1714                 phiNode->mergeFlags(NodeResultJS);
1715                 return phiNode;
1716             });
1717
1718         // Place Phis in the right places, replace all uses of any load with the appropriate
1719         // value, and create the materialization nodes.
1720         LocalOSRAvailabilityCalculator availabilityCalculator(m_graph);
1721         m_graph.clearReplacements();
1722         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1723             m_heap = m_heapAtHead[block];
1724             availabilityCalculator.beginBlock(block);
1725
1726             // These mapping tables are intended to be lazy. If
1727             // something is omitted from the table, it means that
1728             // there haven't been any local stores to the promoted
1729             // heap location (or any local materialization).
1730             m_localMapping.clear();
1731             m_escapeeToMaterialization.clear();
1732
1733             // Insert the Phi functions that we had previously
1734             // created.
1735             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1736                 SSACalculator::Variable* variable = phiDef->variable();
1737                 m_insertionSet.insert(0, phiDef->value());
1738
1739                 PromotedHeapLocation location = indexToLocation[variable->index()];
1740                 m_localMapping.set(location, phiDef->value());
1741
1742                 if (m_sinkCandidates.contains(location.base())) {
1743                     m_insertionSet.insert(
1744                         0,
1745                         location.createHint(
1746                             m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1747                 }
1748             }
1749
1750             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1751                 SSACalculator::Variable* variable = phiDef->variable();
1752                 m_insertionSet.insert(0, phiDef->value());
1753
1754                 Node* identifier = indexToNode[variable->index()];
1755                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1756                 bool canExit = false;
1757                 insertOSRHintsForUpdate(
1758                     0, block->at(0)->origin, canExit,
1759                     availabilityCalculator.m_availability, identifier, phiDef->value());
1760
1761                 for (PromotedHeapLocation location : hintsForPhi[variable->index()]) {
1762                     m_insertionSet.insert(0,
1763                         location.createHint(m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1764                     m_localMapping.set(location, phiDef->value());
1765                 }
1766             }
1767
1768             if (verbose) {
1769                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1770                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1771             }
1772
1773             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1774                 Node* node = block->at(nodeIndex);
1775                 bool canExit = true;
1776                 bool nextCanExit = node->origin.exitOK;
1777                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1778                     if (location.kind() != NamedPropertyPLoc)
1779                         continue;
1780
1781                     m_localMapping.set(location, m_bottom);
1782
1783                     if (m_sinkCandidates.contains(node)) {
1784                         if (verbose)
1785                             dataLog("For sink candidate ", node, " found location ", location, "\n");
1786                         m_insertionSet.insert(
1787                             nodeIndex + 1,
1788                             location.createHint(
1789                                 m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
1790                     }
1791                 }
1792
1793                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1794                     materialization->origin.exitOK &= canExit;
1795                     Node* escapee = m_materializationToEscapee.get(materialization);
1796                     populateMaterialization(block, materialization, escapee);
1797                     m_escapeeToMaterialization.set(escapee, materialization);
1798                     m_insertionSet.insert(nodeIndex, materialization);
1799                     if (verbose)
1800                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1801                 }
1802
1803                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1804                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
1805                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node))
1806                     m_insertionSet.insert(nodeIndex, createRecovery(block, pair.first, node, canExit));
1807
1808                 // We need to put the OSR hints after the recoveries,
1809                 // because we only want the hints once the object is
1810                 // complete
1811                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1812                     Node* escapee = m_materializationToEscapee.get(materialization);
1813                     insertOSRHintsForUpdate(
1814                         nodeIndex, node->origin, canExit,
1815                         availabilityCalculator.m_availability, escapee, materialization);
1816                 }
1817
1818                 if (node->origin.exitOK && !canExit) {
1819                     // We indicate that the exit state is fine now. It is OK because we updated the
1820                     // state above. We need to indicate this manually because the validation doesn't
1821                     // have enough information to infer that the exit state is fine.
1822                     m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
1823                 }
1824
1825                 if (m_sinkCandidates.contains(node))
1826                     m_escapeeToMaterialization.set(node, node);
1827
1828                 availabilityCalculator.executeNode(node);
1829
1830                 bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
1831
1832                 bool doLower = false;
1833                 handleNode(
1834                     node,
1835                     [&] (PromotedHeapLocation location, LazyNode value) {
1836                         if (!locations.contains(location))
1837                             return;
1838
1839                         Node* nodeValue;
1840                         if (value.isNode())
1841                             nodeValue = value.asNode();
1842                         else
1843                             nodeValue = lazyMapping.get(value.asValue());
1844
1845                         nodeValue = resolve(block, nodeValue);
1846
1847                         m_localMapping.set(location, nodeValue);
1848
1849                         if (!m_sinkCandidates.contains(location.base()))
1850                             return;
1851
1852                         doLower = true;
1853
1854                         if (verbose)
1855                             dataLog("Creating hint with value ", nodeValue, " before ", node, "\n");
1856                         m_insertionSet.insert(
1857                             nodeIndex + 1,
1858                             location.createHint(
1859                                 m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
1860                     },
1861                     [&] (PromotedHeapLocation location) -> Node* {
1862                         return resolve(block, location);
1863                     });
1864
1865                 if (!nextCanExit && desiredNextExitOK) {
1866                     // We indicate that the exit state is fine now. We need to do this because we
1867                     // emitted hints that appear to invalidate the exit state.
1868                     m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
1869                 }
1870
1871                 if (m_sinkCandidates.contains(node) || doLower) {
1872                     switch (node->op()) {
1873                     case NewObject:
1874                         node->convertToPhantomNewObject();
1875                         break;
1876
1877                     case NewFunction:
1878                         node->convertToPhantomNewFunction();
1879                         break;
1880
1881                     case NewGeneratorFunction:
1882                         node->convertToPhantomNewGeneratorFunction();
1883                         break;
1884                     case NewAsyncGeneratorFunction:
1885                         node->convertToPhantomNewAsyncGeneratorFunction();
1886                         break;
1887                     case NewAsyncFunction:
1888                         node->convertToPhantomNewAsyncFunction();
1889                         break;
1890
1891                     case CreateActivation:
1892                         node->convertToPhantomCreateActivation();
1893                         break;
1894
1895                     default:
1896                         node->remove();
1897                         break;
1898                     }
1899                 }
1900
1901                 m_graph.doToChildren(
1902                     node,
1903                     [&] (Edge& edge) {
1904                         edge.setNode(resolve(block, edge.node()));
1905                     });
1906             }
1907
1908             // Gotta drop some Upsilons.
1909             NodeAndIndex terminal = block->findTerminal();
1910             size_t upsilonInsertionPoint = terminal.index;
1911             NodeOrigin upsilonOrigin = terminal.node->origin;
1912             for (BasicBlock* successorBlock : block->successors()) {
1913                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1914                     Node* phiNode = phiDef->value();
1915                     SSACalculator::Variable* variable = phiDef->variable();
1916                     PromotedHeapLocation location = indexToLocation[variable->index()];
1917                     Node* incoming = resolve(block, location);
1918
1919                     m_insertionSet.insertNode(
1920                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1921                         OpInfo(phiNode), incoming->defaultEdge());
1922                 }
1923
1924                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1925                     Node* phiNode = phiDef->value();
1926                     SSACalculator::Variable* variable = phiDef->variable();
1927                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1928
1929                     m_insertionSet.insertNode(
1930                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1931                         OpInfo(phiNode), incoming->defaultEdge());
1932                 }
1933             }
1934
1935             m_insertionSet.execute(block);
1936         }
1937     }
1938
1939     NEVER_INLINE Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1940     {
1941         // If we are currently pointing to a single local allocation,
1942         // simply return the associated materialization.
1943         if (Node* identifier = m_heap.follow(location))
1944             return getMaterialization(block, identifier);
1945
1946         if (Node* result = m_localMapping.get(location))
1947             return result;
1948
1949         // This implies that there is no local mapping. Find a non-local mapping.
1950         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1951             block, m_locationToVariable.get(location));
1952         ASSERT(def);
1953         ASSERT(def->value());
1954
1955         Node* result = def->value();
1956         if (result->replacement())
1957             result = result->replacement();
1958         ASSERT(!result->replacement());
1959
1960         m_localMapping.add(location, result);
1961         return result;
1962     }
1963
1964     NEVER_INLINE Node* resolve(BasicBlock* block, Node* node)
1965     {
1966         // If we are currently pointing to a single local allocation,
1967         // simply return the associated materialization.
1968         if (Node* identifier = m_heap.follow(node))
1969             return getMaterialization(block, identifier);
1970
1971         if (node->replacement())
1972             node = node->replacement();
1973         ASSERT(!node->replacement());
1974
1975         return node;
1976     }
1977
1978     NEVER_INLINE Node* getMaterialization(BasicBlock* block, Node* identifier)
1979     {
1980         ASSERT(m_heap.isAllocation(identifier));
1981         if (!m_sinkCandidates.contains(identifier))
1982             return identifier;
1983
1984         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1985             return materialization;
1986
1987         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1988             block, m_nodeToVariable.get(identifier));
1989         ASSERT(def && def->value());
1990         m_escapeeToMaterialization.add(identifier, def->value());
1991         ASSERT(!def->value()->replacement());
1992         return def->value();
1993     }
1994
1995     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
1996     {
1997         if (verbose) {
1998             dataLog("Inserting OSR hints at ", origin, ":\n");
1999             dataLog("    Escapee: ", escapee, "\n");
2000             dataLog("    Materialization: ", materialization, "\n");
2001             dataLog("    Availability: ", availability, "\n");
2002         }
2003         
2004         // We need to follow() the value in the heap.
2005         // Consider the following graph:
2006         //
2007         // Block #0
2008         //   0: NewObject({})
2009         //   1: NewObject({})
2010         //   -: PutByOffset(@0, @1, x:0)
2011         //   -: PutStructure(@0, {x:0})
2012         //   2: GetByOffset(@0, x:0)
2013         //   -: MovHint(@2, loc1)
2014         //   -: Branch(#1, #2)
2015         //
2016         // Block #1
2017         //   3: Call(f, @1)
2018         //   4: Return(@0)
2019         //
2020         // Block #2
2021         //   -: Return(undefined)
2022         //
2023         // We need to materialize @1 at @3, and when doing so we need
2024         // to insert a MovHint for the materialization into loc1 as
2025         // well.
2026         // In order to do this, we say that we need to insert an
2027         // update hint for any availability whose node resolve()s to
2028         // the materialization.
2029         for (auto entry : availability.m_heap) {
2030             if (!entry.value.hasNode())
2031                 continue;
2032             if (m_heap.follow(entry.value.node()) != escapee)
2033                 continue;
2034
2035             m_insertionSet.insert(
2036                 nodeIndex,
2037                 entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
2038         }
2039
2040         for (unsigned i = availability.m_locals.size(); i--;) {
2041             if (!availability.m_locals[i].hasNode())
2042                 continue;
2043             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
2044                 continue;
2045
2046             int operand = availability.m_locals.operandForIndex(i);
2047             m_insertionSet.insertNode(
2048                 nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
2049                 materialization->defaultEdge());
2050         }
2051     }
2052
2053     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
2054     {
2055         Allocation& allocation = m_heap.getAllocation(escapee);
2056         switch (node->op()) {
2057         case MaterializeNewObject: {
2058             ObjectMaterializationData& data = node->objectMaterializationData();
2059             unsigned firstChild = m_graph.m_varArgChildren.size();
2060
2061             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2062
2063             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
2064             ASSERT(locations.contains(structure));
2065
2066             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
2067
2068             for (PromotedHeapLocation location : locations) {
2069                 switch (location.kind()) {
2070                 case StructurePLoc:
2071                     ASSERT(location == structure);
2072                     break;
2073
2074                 case NamedPropertyPLoc: {
2075                     ASSERT(location.base() == allocation.identifier());
2076                     data.m_properties.append(location.descriptor());
2077                     Node* value = resolve(block, location);
2078                     if (m_sinkCandidates.contains(value))
2079                         m_graph.m_varArgChildren.append(m_bottom);
2080                     else
2081                         m_graph.m_varArgChildren.append(value);
2082                     break;
2083                 }
2084
2085                 default:
2086                     DFG_CRASH(m_graph, node, "Bad location kind");
2087                 }
2088             }
2089
2090             node->children = AdjacencyList(
2091                 AdjacencyList::Variable,
2092                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2093             break;
2094         }
2095
2096         case MaterializeCreateActivation: {
2097             ObjectMaterializationData& data = node->objectMaterializationData();
2098
2099             unsigned firstChild = m_graph.m_varArgChildren.size();
2100
2101             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2102
2103             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
2104             ASSERT(locations.contains(symbolTable));
2105             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
2106             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
2107
2108             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
2109             ASSERT(locations.contains(scope));
2110             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
2111
2112             for (PromotedHeapLocation location : locations) {
2113                 switch (location.kind()) {
2114                 case ActivationScopePLoc: {
2115                     ASSERT(location == scope);
2116                     break;
2117                 }
2118
2119                 case ActivationSymbolTablePLoc: {
2120                     ASSERT(location == symbolTable);
2121                     break;
2122                 }
2123
2124                 case ClosureVarPLoc: {
2125                     ASSERT(location.base() == allocation.identifier());
2126                     data.m_properties.append(location.descriptor());
2127                     Node* value = resolve(block, location);
2128                     if (m_sinkCandidates.contains(value))
2129                         m_graph.m_varArgChildren.append(m_bottom);
2130                     else
2131                         m_graph.m_varArgChildren.append(value);
2132                     break;
2133                 }
2134
2135                 default:
2136                     DFG_CRASH(m_graph, node, "Bad location kind");
2137                 }
2138             }
2139
2140             node->children = AdjacencyList(
2141                 AdjacencyList::Variable,
2142                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2143             break;
2144         }
2145         
2146         case NewFunction:
2147         case NewGeneratorFunction:
2148         case NewAsyncGeneratorFunction:
2149         case NewAsyncFunction: {
2150             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2151             ASSERT(locations.size() == 2);
2152                 
2153             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
2154             ASSERT_UNUSED(executable, locations.contains(executable));
2155                 
2156             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
2157             ASSERT(locations.contains(activation));
2158
2159             node->child1() = Edge(resolve(block, activation), KnownCellUse);
2160             break;
2161         }
2162
2163         default:
2164             DFG_CRASH(m_graph, node, "Bad materialize op");
2165         }
2166     }
2167
2168     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
2169     {
2170         if (verbose)
2171             dataLog("Recovering ", location, " at ", where, "\n");
2172         ASSERT(location.base()->isPhantomAllocation());
2173         Node* base = getMaterialization(block, location.base());
2174         Node* value = resolve(block, location);
2175
2176         NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
2177
2178         if (verbose)
2179             dataLog("Base is ", base, " and value is ", value, "\n");
2180
2181         if (base->isPhantomAllocation()) {
2182             return PromotedHeapLocation(base, location.descriptor()).createHint(
2183                 m_graph, origin.takeValidExit(canExit), value);
2184         }
2185
2186         switch (location.kind()) {
2187         case NamedPropertyPLoc: {
2188             Allocation& allocation = m_heap.getAllocation(location.base());
2189
2190             Vector<RegisteredStructure> structures;
2191             structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2192             unsigned identifierNumber = location.info();
2193             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2194
2195             std::sort(
2196                 structures.begin(),
2197                 structures.end(),
2198                 [uid] (RegisteredStructure a, RegisteredStructure b) -> bool {
2199                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2200                 });
2201
2202             RELEASE_ASSERT(structures.size());
2203             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2204
2205             if (firstOffset == structures.last()->getConcurrently(uid)) {
2206                 Node* storage = base;
2207                 // FIXME: When we decide to sink objects with a
2208                 // property storage, we should handle non-inline offsets.
2209                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2210
2211                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2212                 data->offset = firstOffset;
2213                 data->identifierNumber = identifierNumber;
2214
2215                 return m_graph.addNode(
2216                     PutByOffset,
2217                     origin.takeValidExit(canExit),
2218                     OpInfo(data),
2219                     Edge(storage, KnownCellUse),
2220                     Edge(base, KnownCellUse),
2221                     value->defaultEdge());
2222             }
2223
2224             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2225             data->identifierNumber = identifierNumber;
2226
2227             {
2228                 PropertyOffset currentOffset = firstOffset;
2229                 StructureSet currentSet;
2230                 for (RegisteredStructure structure : structures) {
2231                     PropertyOffset offset = structure->getConcurrently(uid);
2232                     if (offset != currentOffset) {
2233                         // Because our analysis treats MultiPutByOffset like an escape, we only have to
2234                         // deal with storing results that would have been previously stored by PutByOffset
2235                         // nodes. Those nodes were guarded by the appropriate type checks. This means that
2236                         // at this point, we can simply trust that the incoming value has the right type
2237                         // for whatever structure we are using.
2238                         data->variants.append(
2239                             PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2240                         currentOffset = offset;
2241                         currentSet.clear();
2242                     }
2243                     currentSet.add(structure.get());
2244                 }
2245                 data->variants.append(
2246                     PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2247             }
2248
2249             return m_graph.addNode(
2250                 MultiPutByOffset,
2251                 origin.takeValidExit(canExit),
2252                 OpInfo(data),
2253                 Edge(base, KnownCellUse),
2254                 value->defaultEdge());
2255             break;
2256         }
2257
2258         case ClosureVarPLoc: {
2259             return m_graph.addNode(
2260                 PutClosureVar,
2261                 origin.takeValidExit(canExit),
2262                 OpInfo(location.info()),
2263                 Edge(base, KnownCellUse),
2264                 value->defaultEdge());
2265             break;
2266         }
2267
2268         default:
2269             DFG_CRASH(m_graph, base, "Bad location kind");
2270             break;
2271         }
2272     }
2273
2274     // This is a great way of asking value->isStillValid() without having to worry about getting
2275     // different answers. It turns out that this analysis works OK regardless of what this
2276     // returns but breaks badly if this changes its mind for any particular InferredValue. This
2277     // method protects us from that.
2278     bool isStillValid(InferredValue* value)
2279     {
2280         return m_validInferredValues.add(value, value->isStillValid()).iterator->value;
2281     }
2282
2283     SSACalculator m_pointerSSA;
2284     SSACalculator m_allocationSSA;
2285     NodeSet m_sinkCandidates;
2286     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2287     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2288     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2289     HashMap<Node*, Node*> m_escapeeToMaterialization;
2290     InsertionSet m_insertionSet;
2291     CombinedLiveness m_combinedLiveness;
2292
2293     HashMap<InferredValue*, bool> m_validInferredValues;
2294
2295     HashMap<Node*, Node*> m_materializationToEscapee;
2296     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2297     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2298     HashMap<Node*, Vector<std::pair<PromotedHeapLocation, Node*>>> m_materializationSiteToHints;
2299
2300     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2301
2302     BlockMap<LocalHeap> m_heapAtHead;
2303     BlockMap<LocalHeap> m_heapAtTail;
2304     LocalHeap m_heap;
2305
2306     Node* m_bottom = nullptr;
2307 };
2308
2309 }
2310
2311 bool performObjectAllocationSinking(Graph& graph)
2312 {
2313     return runPhase<ObjectAllocationSinkingPhase>(graph);
2314 }
2315
2316 } } // namespace JSC::DFG
2317
2318 #endif // ENABLE(DFG_JIT)