[LFC][IFC] Cache InlinteTextItem width when possible
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015-2019 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGClobbersExitState.h"
33 #include "DFGCombinedLiveness.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertionSet.h"
36 #include "DFGLazyNode.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromotedHeapLocation.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44 #include <wtf/StdList.h>
45
46 namespace JSC { namespace DFG {
47
48 namespace {
49
50 namespace DFGObjectAllocationSinkingPhaseInternal {
51 static constexpr bool verbose = false;
52 }
53
54 // In order to sink object cycles, we use a points-to analysis coupled
55 // with an escape analysis. This analysis is actually similar to an
56 // abstract interpreter focused on local allocations and ignoring
57 // everything else.
58 //
59 // We represent the local heap using two mappings:
60 //
61 // - A set of the local allocations present in the function, where
62 //   each of those have a further mapping from
63 //   PromotedLocationDescriptor to local allocations they must point
64 //   to.
65 //
66 // - A "pointer" mapping from nodes to local allocations, if they must
67 //   be equal to said local allocation and are currently live. This
68 //   can be because the node is the actual node that created the
69 //   allocation, or any other node that must currently point to it -
70 //   we don't make a difference.
71 //
72 // The following graph is a motivation for why we separate allocations
73 // from pointers:
74 //
75 // Block #0
76 //  0: NewObject({})
77 //  1: NewObject({})
78 //  -: PutByOffset(@0, @1, x)
79 //  -: PutStructure(@0, {x:0})
80 //  2: GetByOffset(@0, x)
81 //  -: Jump(#1)
82 //
83 // Block #1
84 //  -: Return(@2)
85 //
86 // Here, we need to remember in block #1 that @2 points to a local
87 // allocation with appropriate fields and structures information
88 // (because we should be able to place a materialization on top of
89 // block #1 here), even though @1 is dead. We *could* just keep @1
90 // artificially alive here, but there is no real reason to do it:
91 // after all, by the end of block #0, @1 and @2 should be completely
92 // interchangeable, and there is no reason for us to artificially make
93 // @1 more important.
94 //
95 // An important point to consider to understand this separation is
96 // that we should think of the local heap as follow: we have a
97 // bunch of nodes that are pointers to "allocations" that live
98 // someplace on the heap, and those allocations can have pointers in
99 // between themselves as well. We shouldn't care about whatever
100 // names we give to the allocations ; what matters when
101 // comparing/merging two heaps is the isomorphism/comparison between
102 // the allocation graphs as seen by the nodes.
103 //
104 // For instance, in the following graph:
105 //
106 // Block #0
107 //  0: NewObject({})
108 //  -: Branch(#1, #2)
109 //
110 // Block #1
111 //  1: NewObject({})
112 //  -: PutByOffset(@0, @1, x)
113 //  -: PutStructure(@0, {x:0})
114 //  -: Jump(#3)
115 //
116 // Block #2
117 //  2: NewObject({})
118 //  -: PutByOffset(@2, undefined, x)
119 //  -: PutStructure(@2, {x:0})
120 //  -: PutByOffset(@0, @2, x)
121 //  -: PutStructure(@0, {x:0})
122 //  -: Jump(#3)
123 //
124 // Block #3
125 //  -: Return(@0)
126 //
127 // we should think of the heaps at tail of blocks #1 and #2 as being
128 // exactly the same, even though one has @0.x pointing to @1 and the
129 // other has @0.x pointing to @2, because in essence this should not
130 // be different from the graph where we hoisted @1 and @2 into a
131 // single allocation in block #0. We currently will not handle this
132 // case, because we merge allocations based on the node they are
133 // coming from, but this is only a technicality for the sake of
134 // simplicity that shouldn't hide the deeper idea outlined here.
135
136 class Allocation {
137 public:
138     // We use Escaped as a special allocation kind because when we
139     // decide to sink an allocation, we still need to keep track of it
140     // once it is escaped if it still has pointers to it in order to
141     // replace any use of those pointers by the corresponding
142     // materialization
143     enum class Kind { Escaped, Object, Activation, Function, GeneratorFunction, AsyncFunction, AsyncGeneratorFunction, RegExpObject };
144
145     using Fields = HashMap<PromotedLocationDescriptor, Node*>;
146
147     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
148         : m_identifier(identifier)
149         , m_kind(kind)
150     {
151     }
152
153
154     const Fields& fields() const
155     {
156         return m_fields;
157     }
158
159     Fields& fields()
160     {
161         return m_fields;
162     }
163
164     Node* get(PromotedLocationDescriptor descriptor)
165     {
166         return m_fields.get(descriptor);
167     }
168
169     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
170     {
171         // Pointing to anything else than an unescaped local
172         // allocation is represented by simply not having the
173         // field
174         if (value)
175             m_fields.set(descriptor, value);
176         else
177             m_fields.remove(descriptor);
178         return *this;
179     }
180
181     void remove(PromotedLocationDescriptor descriptor)
182     {
183         set(descriptor, nullptr);
184     }
185
186     bool hasStructures() const
187     {
188         switch (kind()) {
189         case Kind::Object:
190             return true;
191
192         default:
193             return false;
194         }
195     }
196
197     Allocation& setStructures(const RegisteredStructureSet& structures)
198     {
199         ASSERT(hasStructures() && !structures.isEmpty());
200         m_structures = structures;
201         return *this;
202     }
203
204     Allocation& mergeStructures(const RegisteredStructureSet& structures)
205     {
206         ASSERT(hasStructures() || structures.isEmpty());
207         m_structures.merge(structures);
208         return *this;
209     }
210
211     Allocation& filterStructures(const RegisteredStructureSet& structures)
212     {
213         ASSERT(hasStructures());
214         m_structures.filter(structures);
215         RELEASE_ASSERT(!m_structures.isEmpty());
216         return *this;
217     }
218
219     const RegisteredStructureSet& structures() const
220     {
221         return m_structures;
222     }
223
224     Node* identifier() const { return m_identifier; }
225
226     Kind kind() const { return m_kind; }
227
228     bool isEscapedAllocation() const
229     {
230         return kind() == Kind::Escaped;
231     }
232
233     bool isObjectAllocation() const
234     {
235         return m_kind == Kind::Object;
236     }
237
238     bool isActivationAllocation() const
239     {
240         return m_kind == Kind::Activation;
241     }
242
243     bool isFunctionAllocation() const
244     {
245         return m_kind == Kind::Function || m_kind == Kind::GeneratorFunction || m_kind == Kind::AsyncFunction;
246     }
247
248     bool isRegExpObjectAllocation() const
249     {
250         return m_kind == Kind::RegExpObject;
251     }
252
253     bool operator==(const Allocation& other) const
254     {
255         return m_identifier == other.m_identifier
256             && m_kind == other.m_kind
257             && m_fields == other.m_fields
258             && m_structures == other.m_structures;
259     }
260
261     bool operator!=(const Allocation& other) const
262     {
263         return !(*this == other);
264     }
265
266     void dump(PrintStream& out) const
267     {
268         dumpInContext(out, nullptr);
269     }
270
271     void dumpInContext(PrintStream& out, DumpContext* context) const
272     {
273         switch (m_kind) {
274         case Kind::Escaped:
275             out.print("Escaped");
276             break;
277
278         case Kind::Object:
279             out.print("Object");
280             break;
281
282         case Kind::Function:
283             out.print("Function");
284             break;
285
286         case Kind::GeneratorFunction:
287             out.print("GeneratorFunction");
288             break;
289
290         case Kind::AsyncFunction:
291             out.print("AsyncFunction");
292             break;
293
294         case Kind::AsyncGeneratorFunction:
295             out.print("AsyncGeneratorFunction");
296             break;
297
298         case Kind::Activation:
299             out.print("Activation");
300             break;
301
302         case Kind::RegExpObject:
303             out.print("RegExpObject");
304             break;
305         }
306         out.print("Allocation(");
307         if (!m_structures.isEmpty())
308             out.print(inContext(m_structures.toStructureSet(), context));
309         if (!m_fields.isEmpty()) {
310             if (!m_structures.isEmpty())
311                 out.print(", ");
312             out.print(mapDump(m_fields, " => #", ", "));
313         }
314         out.print(")");
315     }
316
317 private:
318     Node* m_identifier; // This is the actual node that created the allocation
319     Kind m_kind;
320     Fields m_fields;
321     RegisteredStructureSet m_structures;
322 };
323
324 class LocalHeap {
325 public:
326     Allocation& newAllocation(Node* node, Allocation::Kind kind)
327     {
328         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
329         m_pointers.add(node, node);
330         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
331     }
332
333     bool isAllocation(Node* identifier) const
334     {
335         return m_allocations.contains(identifier);
336     }
337
338     // Note that this is fundamentally different from
339     // onlyLocalAllocation() below. getAllocation() takes as argument
340     // a node-as-identifier, that is, an allocation node. This
341     // allocation node doesn't have to be alive; it may only be
342     // pointed to by other nodes or allocation fields.
343     // For instance, in the following graph:
344     //
345     // Block #0
346     //  0: NewObject({})
347     //  1: NewObject({})
348     //  -: PutByOffset(@0, @1, x)
349     //  -: PutStructure(@0, {x:0})
350     //  2: GetByOffset(@0, x)
351     //  -: Jump(#1)
352     //
353     // Block #1
354     //  -: Return(@2)
355     //
356     // At head of block #1, the only reachable allocation is #@1,
357     // which can be reached through node @2. Thus, getAllocation(#@1)
358     // contains the appropriate metadata for this allocation, but
359     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
360     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
361     // is the same as getAllocation(#@1), while getAllocation(#@2)
362     // does not make sense since @2 is not an allocation node.
363     //
364     // This is meant to be used when the node is already known to be
365     // an identifier (i.e. an allocation) - probably because it was
366     // found as value of a field or pointer in the current heap, or
367     // was the result of a call to follow(). In any other cases (such
368     // as when doing anything while traversing the graph), the
369     // appropriate function to call is probably onlyLocalAllocation.
370     Allocation& getAllocation(Node* identifier)
371     {
372         auto iter = m_allocations.find(identifier);
373         ASSERT(iter != m_allocations.end());
374         return iter->value;
375     }
376
377     void newPointer(Node* node, Node* identifier)
378     {
379         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
380         ASSERT(isAllocation(identifier));
381         m_pointers.add(node, identifier);
382     }
383
384     // follow solves the points-to problem. Given a live node, which
385     // may be either an allocation itself or a heap read (e.g. a
386     // GetByOffset node), it returns the corresponding allocation
387     // node, if there is one. If the argument node is neither an
388     // allocation or a heap read, or may point to different nodes,
389     // nullptr will be returned. Note that a node that points to
390     // different nodes can never point to an unescaped local
391     // allocation.
392     Node* follow(Node* node) const
393     {
394         auto iter = m_pointers.find(node);
395         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
396         return iter == m_pointers.end() ? nullptr : iter->value;
397     }
398
399     Node* follow(PromotedHeapLocation location) const
400     {
401         const Allocation& base = m_allocations.find(location.base())->value;
402         auto iter = base.fields().find(location.descriptor());
403
404         if (iter == base.fields().end())
405             return nullptr;
406
407         return iter->value;
408     }
409
410     // onlyLocalAllocation find the corresponding allocation metadata
411     // for any live node. onlyLocalAllocation(node) is essentially
412     // getAllocation(follow(node)), with appropriate null handling.
413     Allocation* onlyLocalAllocation(Node* node)
414     {
415         Node* identifier = follow(node);
416         if (!identifier)
417             return nullptr;
418
419         return &getAllocation(identifier);
420     }
421
422     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
423     {
424         Node* identifier = follow(location);
425         if (!identifier)
426             return nullptr;
427
428         return &getAllocation(identifier);
429     }
430
431     // This allows us to store the escapees only when necessary. If
432     // set, the current escapees can be retrieved at any time using
433     // takeEscapees(), which will clear the cached set of escapees;
434     // otherwise the heap won't remember escaping allocations.
435     void setWantEscapees()
436     {
437         m_wantEscapees = true;
438     }
439
440     HashMap<Node*, Allocation> takeEscapees()
441     {
442         return WTFMove(m_escapees);
443     }
444
445     void escape(Node* node)
446     {
447         Node* identifier = follow(node);
448         if (!identifier)
449             return;
450
451         escapeAllocation(identifier);
452     }
453
454     void merge(const LocalHeap& other)
455     {
456         assertIsValid();
457         other.assertIsValid();
458         ASSERT(!m_wantEscapees);
459
460         if (!reached()) {
461             ASSERT(other.reached());
462             *this = other;
463             return;
464         }
465
466         NodeSet toEscape;
467
468         for (auto& allocationEntry : other.m_allocations)
469             m_allocations.add(allocationEntry.key, allocationEntry.value);
470         for (auto& allocationEntry : m_allocations) {
471             auto allocationIter = other.m_allocations.find(allocationEntry.key);
472
473             // If we have it and they don't, it died for them but we
474             // are keeping it alive from another field somewhere.
475             // There is nothing to do - we will be escaped
476             // automatically when we handle that other field.
477             // This will also happen for allocation that we have and
478             // they don't, and all of those will get pruned.
479             if (allocationIter == other.m_allocations.end())
480                 continue;
481
482             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
483                 toEscape.addVoid(allocationEntry.key);
484                 for (const auto& fieldEntry : allocationIter->value.fields())
485                     toEscape.addVoid(fieldEntry.value);
486             } else {
487                 mergePointerSets(allocationEntry.value.fields(), allocationIter->value.fields(), toEscape);
488                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
489             }
490         }
491
492         mergePointerSets(m_pointers, other.m_pointers, toEscape);
493
494         for (Node* identifier : toEscape)
495             escapeAllocation(identifier);
496
497         if (!ASSERT_DISABLED) {
498             for (const auto& entry : m_allocations)
499                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
500         }
501
502         // If there is no remaining pointer to an allocation, we can
503         // remove it. This should only happen for escaped allocations,
504         // because we only merge liveness-pruned heaps in the first
505         // place.
506         prune();
507
508         assertIsValid();
509     }
510
511     void pruneByLiveness(const NodeSet& live)
512     {
513         m_pointers.removeIf(
514             [&] (const auto& entry) {
515                 return !live.contains(entry.key);
516             });
517         prune();
518     }
519
520     void assertIsValid() const
521     {
522         if (ASSERT_DISABLED)
523             return;
524
525         // Pointers should point to an actual allocation
526         for (const auto& entry : m_pointers) {
527             ASSERT_UNUSED(entry, entry.value);
528             ASSERT(m_allocations.contains(entry.value));
529         }
530
531         for (const auto& allocationEntry : m_allocations) {
532             // Fields should point to an actual allocation
533             for (const auto& fieldEntry : allocationEntry.value.fields()) {
534                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
535                 ASSERT(m_allocations.contains(fieldEntry.value));
536             }
537         }
538     }
539
540     bool operator==(const LocalHeap& other) const
541     {
542         assertIsValid();
543         other.assertIsValid();
544         return m_allocations == other.m_allocations
545             && m_pointers == other.m_pointers;
546     }
547
548     bool operator!=(const LocalHeap& other) const
549     {
550         return !(*this == other);
551     }
552
553     const HashMap<Node*, Allocation>& allocations() const
554     {
555         return m_allocations;
556     }
557
558     const HashMap<Node*, Node*>& pointers() const
559     {
560         return m_pointers;
561     }
562
563     void dump(PrintStream& out) const
564     {
565         out.print("  Allocations:\n");
566         for (const auto& entry : m_allocations)
567             out.print("    #", entry.key, ": ", entry.value, "\n");
568         out.print("  Pointers:\n");
569         for (const auto& entry : m_pointers)
570             out.print("    ", entry.key, " => #", entry.value, "\n");
571     }
572
573     bool reached() const
574     {
575         return m_reached;
576     }
577
578     void setReached()
579     {
580         m_reached = true;
581     }
582
583 private:
584     // When we merge two heaps, we escape all fields of allocations,
585     // unless they point to the same thing in both heaps.
586     // The reason for this is that it allows us not to do extra work
587     // for diamond graphs where we would otherwise have to check
588     // whether we have a single definition or not, which would be
589     // cumbersome.
590     //
591     // Note that we should try to unify nodes even when they are not
592     // from the same allocation; for instance we should be able to
593     // completely eliminate all allocations from the following graph:
594     //
595     // Block #0
596     //  0: NewObject({})
597     //  -: Branch(#1, #2)
598     //
599     // Block #1
600     //  1: NewObject({})
601     //  -: PutByOffset(@1, "left", val)
602     //  -: PutStructure(@1, {val:0})
603     //  -: PutByOffset(@0, @1, x)
604     //  -: PutStructure(@0, {x:0})
605     //  -: Jump(#3)
606     //
607     // Block #2
608     //  2: NewObject({})
609     //  -: PutByOffset(@2, "right", val)
610     //  -: PutStructure(@2, {val:0})
611     //  -: PutByOffset(@0, @2, x)
612     //  -: PutStructure(@0, {x:0})
613     //  -: Jump(#3)
614     //
615     // Block #3:
616     //  3: GetByOffset(@0, x)
617     //  4: GetByOffset(@3, val)
618     //  -: Return(@4)
619     template<typename Key>
620     static void mergePointerSets(HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their, NodeSet& toEscape)
621     {
622         auto escape = [&] (Node* identifier) {
623             toEscape.addVoid(identifier);
624         };
625
626         for (const auto& entry : their) {
627             if (!my.contains(entry.key))
628                 escape(entry.value);
629         }
630         my.removeIf([&] (const auto& entry) {
631             auto iter = their.find(entry.key);
632             if (iter == their.end()) {
633                 escape(entry.value);
634                 return true;
635             }
636             if (iter->value != entry.value) {
637                 escape(entry.value);
638                 escape(iter->value);
639                 return true;
640             }
641             return false;
642         });
643     }
644
645     void escapeAllocation(Node* identifier)
646     {
647         Allocation& allocation = getAllocation(identifier);
648         if (allocation.isEscapedAllocation())
649             return;
650
651         Allocation unescaped = WTFMove(allocation);
652         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
653
654         for (const auto& entry : unescaped.fields())
655             escapeAllocation(entry.value);
656
657         if (m_wantEscapees)
658             m_escapees.add(unescaped.identifier(), WTFMove(unescaped));
659     }
660
661     void prune()
662     {
663         NodeSet reachable;
664         for (const auto& entry : m_pointers)
665             reachable.addVoid(entry.value);
666
667         // Repeatedly mark as reachable allocations in fields of other
668         // reachable allocations
669         {
670             Vector<Node*> worklist;
671             worklist.appendRange(reachable.begin(), reachable.end());
672
673             while (!worklist.isEmpty()) {
674                 Node* identifier = worklist.takeLast();
675                 Allocation& allocation = m_allocations.find(identifier)->value;
676                 for (const auto& entry : allocation.fields()) {
677                     if (reachable.add(entry.value).isNewEntry)
678                         worklist.append(entry.value);
679                 }
680             }
681         }
682
683         // Remove unreachable allocations
684         m_allocations.removeIf(
685             [&] (const auto& entry) {
686                 return !reachable.contains(entry.key);
687             });
688     }
689
690     bool m_reached = false;
691     HashMap<Node*, Node*> m_pointers;
692     HashMap<Node*, Allocation> m_allocations;
693
694     bool m_wantEscapees = false;
695     HashMap<Node*, Allocation> m_escapees;
696 };
697
698 class ObjectAllocationSinkingPhase : public Phase {
699 public:
700     ObjectAllocationSinkingPhase(Graph& graph)
701         : Phase(graph, "object allocation elimination")
702         , m_pointerSSA(graph)
703         , m_allocationSSA(graph)
704         , m_insertionSet(graph)
705     {
706     }
707
708     bool run()
709     {
710         ASSERT(m_graph.m_form == SSA);
711         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
712
713         if (!performSinking())
714             return false;
715
716         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
717             dataLog("Graph after elimination:\n");
718             m_graph.dump();
719         }
720
721         return true;
722     }
723
724 private:
725     bool performSinking()
726     {
727         m_graph.computeRefCounts();
728         m_graph.initializeNodeOwners();
729         m_graph.ensureSSADominators();
730         performLivenessAnalysis(m_graph);
731         performOSRAvailabilityAnalysis(m_graph);
732         m_combinedLiveness = CombinedLiveness(m_graph);
733
734         CString graphBeforeSinking;
735         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
736             StringPrintStream out;
737             m_graph.dump(out);
738             graphBeforeSinking = out.toCString();
739         }
740
741         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
742             dataLog("Graph before elimination:\n");
743             m_graph.dump();
744         }
745
746         performAnalysis();
747
748         if (!determineSinkCandidates())
749             return false;
750
751         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
752             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
753                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
754                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
755             }
756         }
757
758         promoteLocalHeap();
759         removeICStatusFilters();
760
761         if (Options::validateGraphAtEachPhase())
762             DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
763         return true;
764     }
765
766     void performAnalysis()
767     {
768         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
769         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
770
771         bool changed;
772         do {
773             if (DFGObjectAllocationSinkingPhaseInternal::verbose)
774                 dataLog("Doing iteration of escape analysis.\n");
775             changed = false;
776
777             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
778                 m_heapAtHead[block].setReached();
779                 m_heap = m_heapAtHead[block];
780
781                 for (Node* node : *block) {
782                     handleNode(
783                         node,
784                         [] (PromotedHeapLocation, LazyNode) { },
785                         [&] (PromotedHeapLocation) -> Node* {
786                             return nullptr;
787                         });
788                 }
789
790                 if (m_heap == m_heapAtTail[block])
791                     continue;
792
793                 m_heapAtTail[block] = m_heap;
794                 changed = true;
795
796                 m_heap.assertIsValid();
797
798                 // We keep only pointers that are live, and only
799                 // allocations that are either live, pointed to by a
800                 // live pointer, or (recursively) stored in a field of
801                 // a live allocation.
802                 //
803                 // This means we can accidentaly leak non-dominating
804                 // nodes into the successor. However, due to the
805                 // non-dominance property, we are guaranteed that the
806                 // successor has at least one predecessor that is not
807                 // dominated either: this means any reference to a
808                 // non-dominating allocation in the successor will
809                 // trigger an escape and get pruned during the merge.
810                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
811
812                 for (BasicBlock* successorBlock : block->successors())
813                     m_heapAtHead[successorBlock].merge(m_heap);
814             }
815         } while (changed);
816     }
817
818     template<typename WriteFunctor, typename ResolveFunctor>
819     void handleNode(
820         Node* node,
821         const WriteFunctor& heapWrite,
822         const ResolveFunctor& heapResolve)
823     {
824         m_heap.assertIsValid();
825         ASSERT(m_heap.takeEscapees().isEmpty());
826
827         Allocation* target = nullptr;
828         HashMap<PromotedLocationDescriptor, LazyNode> writes;
829         PromotedLocationDescriptor exactRead;
830
831         switch (node->op()) {
832         case NewObject:
833             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
834             target->setStructures(node->structure());
835             writes.add(
836                 StructurePLoc, LazyNode(m_graph.freeze(node->structure().get())));
837             break;
838
839         case NewFunction:
840         case NewGeneratorFunction:
841         case NewAsyncGeneratorFunction:
842         case NewAsyncFunction: {
843             if (isStillValid(node->castOperand<FunctionExecutable*>())) {
844                 m_heap.escape(node->child1().node());
845                 break;
846             }
847
848             if (node->op() == NewGeneratorFunction)
849                 target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction);
850             else if (node->op() == NewAsyncFunction)
851                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncFunction);
852             else if (node->op() == NewAsyncGeneratorFunction)
853                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncGeneratorFunction);
854             else
855                 target = &m_heap.newAllocation(node, Allocation::Kind::Function);
856
857             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
858             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
859             break;
860         }
861
862         case NewRegexp: {
863             target = &m_heap.newAllocation(node, Allocation::Kind::RegExpObject);
864
865             writes.add(RegExpObjectRegExpPLoc, LazyNode(node->cellOperand()));
866             writes.add(RegExpObjectLastIndexPLoc, LazyNode(node->child1().node()));
867             break;
868         }
869
870         case CreateActivation: {
871             if (isStillValid(node->castOperand<SymbolTable*>())) {
872                 m_heap.escape(node->child1().node());
873                 break;
874             }
875             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
876             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
877             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
878             {
879                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
880                 LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
881                 for (unsigned offset = 0; offset < symbolTable->scopeSize(); ++offset) {
882                     writes.add(
883                         PromotedLocationDescriptor(ClosureVarPLoc, offset),
884                         initialValue);
885                 }
886             }
887             break;
888         }
889
890         case PutStructure:
891             target = m_heap.onlyLocalAllocation(node->child1().node());
892             if (target && target->isObjectAllocation()) {
893                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next.get()))));
894                 target->setStructures(node->transition()->next);
895             } else
896                 m_heap.escape(node->child1().node());
897             break;
898
899         case CheckStructureOrEmpty:
900         case CheckStructure: {
901             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
902             if (allocation && allocation->isObjectAllocation()) {
903                 RegisteredStructureSet filteredStructures = allocation->structures();
904                 filteredStructures.filter(node->structureSet());
905                 if (filteredStructures.isEmpty()) {
906                     // FIXME: Write a test for this:
907                     // https://bugs.webkit.org/show_bug.cgi?id=174322
908                     m_heap.escape(node->child1().node());
909                     break;
910                 }
911                 allocation->setStructures(filteredStructures);
912                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
913                     node->convertToCheckStructureImmediate(value);
914             } else
915                 m_heap.escape(node->child1().node());
916             break;
917         }
918
919         case GetByOffset:
920         case GetGetterSetterByOffset:
921             target = m_heap.onlyLocalAllocation(node->child2().node());
922             if (target && target->isObjectAllocation()) {
923                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
924                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
925             } else {
926                 m_heap.escape(node->child1().node());
927                 m_heap.escape(node->child2().node());
928             }
929             break;
930
931         case MultiGetByOffset: {
932             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
933             if (allocation && allocation->isObjectAllocation()) {
934                 MultiGetByOffsetData& data = node->multiGetByOffsetData();
935                 RegisteredStructureSet validStructures;
936                 bool hasInvalidStructures = false;
937                 for (const auto& multiGetByOffsetCase : data.cases) {
938                     if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
939                         continue;
940
941                     switch (multiGetByOffsetCase.method().kind()) {
942                     case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
943                     case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
944                         hasInvalidStructures = true;
945                         break;
946
947                     case GetByOffsetMethod::Load: // We're good
948                         validStructures.merge(multiGetByOffsetCase.set());
949                         break;
950
951                     default:
952                         RELEASE_ASSERT_NOT_REACHED();
953                     }
954                 }
955                 if (hasInvalidStructures || validStructures.isEmpty()) {
956                     m_heap.escape(node->child1().node());
957                     break;
958                 }
959                 unsigned identifierNumber = data.identifierNumber;
960                 PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
961                 if (Node* value = heapResolve(location)) {
962                     if (allocation->structures().isSubsetOf(validStructures))
963                         node->replaceWithWithoutChecks(value);
964                     else {
965                         Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
966                         ASSERT(structure);
967                         allocation->filterStructures(validStructures);
968                         node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
969                         node->convertToCheckStructureImmediate(structure);
970                         node->setReplacement(value);
971                     }
972                 } else if (!allocation->structures().isSubsetOf(validStructures)) {
973                     // Even though we don't need the result here, we still need
974                     // to make the call to tell our caller that we could need
975                     // the StructurePLoc.
976                     // The reason for this is that when we decide not to sink a
977                     // node, we will still lower any read to its fields before
978                     // it escapes (which are usually reads across a function
979                     // call that DFGClobberize can't handle) - but we only do
980                     // this for PromotedHeapLocations that we have seen read
981                     // during the analysis!
982                     heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
983                     allocation->filterStructures(validStructures);
984                 }
985                 Node* identifier = allocation->get(location.descriptor());
986                 if (identifier)
987                     m_heap.newPointer(node, identifier);
988             } else
989                 m_heap.escape(node->child1().node());
990             break;
991         }
992
993         case PutByOffset:
994             target = m_heap.onlyLocalAllocation(node->child2().node());
995             if (target && target->isObjectAllocation()) {
996                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
997                 writes.add(
998                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
999                     LazyNode(node->child3().node()));
1000             } else {
1001                 m_heap.escape(node->child1().node());
1002                 m_heap.escape(node->child2().node());
1003                 m_heap.escape(node->child3().node());
1004             }
1005             break;
1006
1007         case GetClosureVar:
1008             target = m_heap.onlyLocalAllocation(node->child1().node());
1009             if (target && target->isActivationAllocation()) {
1010                 exactRead =
1011                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
1012             } else
1013                 m_heap.escape(node->child1().node());
1014             break;
1015
1016         case PutClosureVar:
1017             target = m_heap.onlyLocalAllocation(node->child1().node());
1018             if (target && target->isActivationAllocation()) {
1019                 writes.add(
1020                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
1021                     LazyNode(node->child2().node()));
1022             } else {
1023                 m_heap.escape(node->child1().node());
1024                 m_heap.escape(node->child2().node());
1025             }
1026             break;
1027
1028         case SkipScope:
1029             target = m_heap.onlyLocalAllocation(node->child1().node());
1030             if (target && target->isActivationAllocation())
1031                 exactRead = ActivationScopePLoc;
1032             else
1033                 m_heap.escape(node->child1().node());
1034             break;
1035
1036         case GetExecutable:
1037             target = m_heap.onlyLocalAllocation(node->child1().node());
1038             if (target && target->isFunctionAllocation())
1039                 exactRead = FunctionExecutablePLoc;
1040             else
1041                 m_heap.escape(node->child1().node());
1042             break;
1043
1044         case GetScope:
1045             target = m_heap.onlyLocalAllocation(node->child1().node());
1046             if (target && target->isFunctionAllocation())
1047                 exactRead = FunctionActivationPLoc;
1048             else
1049                 m_heap.escape(node->child1().node());
1050             break;
1051
1052         case GetRegExpObjectLastIndex:
1053             target = m_heap.onlyLocalAllocation(node->child1().node());
1054             if (target && target->isRegExpObjectAllocation())
1055                 exactRead = RegExpObjectLastIndexPLoc;
1056             else
1057                 m_heap.escape(node->child1().node());
1058             break;
1059
1060         case SetRegExpObjectLastIndex:
1061             target = m_heap.onlyLocalAllocation(node->child1().node());
1062             if (target && target->isRegExpObjectAllocation()) {
1063                 writes.add(
1064                     PromotedLocationDescriptor(RegExpObjectLastIndexPLoc),
1065                     LazyNode(node->child2().node()));
1066             } else {
1067                 m_heap.escape(node->child1().node());
1068                 m_heap.escape(node->child2().node());
1069             }
1070             break;
1071
1072         case Check:
1073         case CheckVarargs:
1074             m_graph.doToChildren(
1075                 node,
1076                 [&] (Edge edge) {
1077                     if (edge.willNotHaveCheck())
1078                         return;
1079
1080                     if (alreadyChecked(edge.useKind(), SpecObject))
1081                         return;
1082
1083                     m_heap.escape(edge.node());
1084                 });
1085             break;
1086
1087         case MovHint:
1088         case PutHint:
1089             // Handled by OSR availability analysis
1090             break;
1091             
1092         case FilterCallLinkStatus:
1093         case FilterGetByStatus:
1094         case FilterPutByIdStatus:
1095         case FilterInByIdStatus:
1096             break;
1097
1098         default:
1099             m_graph.doToChildren(
1100                 node,
1101                 [&] (Edge edge) {
1102                     m_heap.escape(edge.node());
1103                 });
1104             break;
1105         }
1106
1107         if (exactRead) {
1108             ASSERT(target);
1109             ASSERT(writes.isEmpty());
1110             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1111                 ASSERT(!value->replacement());
1112                 node->replaceWith(m_graph, value);
1113             }
1114             Node* identifier = target->get(exactRead);
1115             if (identifier)
1116                 m_heap.newPointer(node, identifier);
1117         }
1118
1119         for (auto entry : writes) {
1120             ASSERT(target);
1121             if (entry.value.isNode())
1122                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1123             else
1124                 target->remove(entry.key);
1125             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1126         }
1127
1128         m_heap.assertIsValid();
1129     }
1130
1131     bool determineSinkCandidates()
1132     {
1133         m_sinkCandidates.clear();
1134         m_materializationToEscapee.clear();
1135         m_materializationSiteToMaterializations.clear();
1136         m_materializationSiteToRecoveries.clear();
1137         m_materializationSiteToHints.clear();
1138
1139         // Logically we wish to consider every allocation and sink
1140         // it. However, it is probably not profitable to sink an
1141         // allocation that will always escape. So, we only sink an
1142         // allocation if one of the following is true:
1143         //
1144         // 1) There exists a basic block with only backwards outgoing
1145         //    edges (or no outgoing edges) in which the node wasn't
1146         //    materialized. This is meant to catch
1147         //    effectively-infinite loops in which we don't need to
1148         //    have allocated the object.
1149         //
1150         // 2) There exists a basic block at the tail of which the node
1151         //    is dead and not materialized.
1152         //
1153         // 3) The sum of execution counts of the materializations is
1154         //    less than the sum of execution counts of the original
1155         //    node.
1156         //
1157         // We currently implement only rule #2.
1158         // FIXME: Implement the two other rules.
1159         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1160         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1161         //
1162         // However, these rules allow for a sunk object to be put into
1163         // a non-sunk one, which we don't support. We could solve this
1164         // by supporting PutHints on local allocations, making these
1165         // objects only partially correct, and we would need to adapt
1166         // the OSR availability analysis and OSR exit to handle
1167         // this. This would be totally doable, but would create a
1168         // super rare, and thus bug-prone, code path.
1169         // So, instead, we need to implement one of the following
1170         // closure rules:
1171         //
1172         // 1) If we put a sink candidate into a local allocation that
1173         //    is not a sink candidate, change our minds and don't
1174         //    actually sink the sink candidate.
1175         //
1176         // 2) If we put a sink candidate into a local allocation, that
1177         //    allocation becomes a sink candidate as well.
1178         //
1179         // We currently choose to implement closure rule #2.
1180         HashMap<Node*, Vector<Node*>> dependencies;
1181         bool hasUnescapedReads = false;
1182         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1183             m_heap = m_heapAtHead[block];
1184
1185             for (Node* node : *block) {
1186                 handleNode(
1187                     node,
1188                     [&] (PromotedHeapLocation location, LazyNode value) {
1189                         if (!value.isNode())
1190                             return;
1191
1192                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1193                         if (allocation && !allocation->isEscapedAllocation())
1194                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1195                     },
1196                     [&] (PromotedHeapLocation) -> Node* {
1197                         hasUnescapedReads = true;
1198                         return nullptr;
1199                     });
1200             }
1201
1202             // The sink candidates are initially the unescaped
1203             // allocations dying at tail of blocks
1204             NodeSet allocations;
1205             for (const auto& entry : m_heap.allocations()) {
1206                 if (!entry.value.isEscapedAllocation())
1207                     allocations.addVoid(entry.key);
1208             }
1209
1210             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1211
1212             for (Node* identifier : allocations) {
1213                 if (!m_heap.isAllocation(identifier))
1214                     m_sinkCandidates.addVoid(identifier);
1215             }
1216         }
1217
1218         auto forEachEscapee = [&] (auto callback) {
1219             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1220                 m_heap = m_heapAtHead[block];
1221                 m_heap.setWantEscapees();
1222
1223                 for (Node* node : *block) {
1224                     handleNode(
1225                         node,
1226                         [] (PromotedHeapLocation, LazyNode) { },
1227                         [] (PromotedHeapLocation) -> Node* {
1228                             return nullptr;
1229                         });
1230                     auto escapees = m_heap.takeEscapees();
1231                     escapees.removeIf([&] (const auto& entry) { return !m_sinkCandidates.contains(entry.key); });
1232                     callback(escapees, node);
1233                 }
1234
1235                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1236
1237                 {
1238                     HashMap<Node*, Allocation> escapingOnEdge;
1239                     for (const auto& entry : m_heap.allocations()) {
1240                         if (entry.value.isEscapedAllocation())
1241                             continue;
1242
1243                         bool mustEscape = false;
1244                         for (BasicBlock* successorBlock : block->successors()) {
1245                             if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1246                                 || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1247                                 mustEscape = true;
1248                         }
1249
1250                         if (mustEscape && m_sinkCandidates.contains(entry.key))
1251                             escapingOnEdge.add(entry.key, entry.value);
1252                     }
1253                     callback(escapingOnEdge, block->terminal());
1254                 }
1255             }
1256         };
1257
1258         if (m_sinkCandidates.size()) {
1259             // If we're moving an allocation to `where` in the program, we need to ensure
1260             // we can still walk the stack at that point in the program for the
1261             // InlineCallFrame of the original allocation. Certain InlineCallFrames rely on
1262             // data in the stack when taking a stack trace. All allocation sites can do a
1263             // stack walk (we do a stack walk when we GC). Conservatively, we say we're
1264             // still ok to move this allocation if we are moving within the same InlineCallFrame.
1265             // We could be more precise here and do an analysis of stack writes. However,
1266             // this scenario is so rare that we just take the conservative-and-straight-forward 
1267             // approach of checking that we're in the same InlineCallFrame.
1268
1269             forEachEscapee([&] (HashMap<Node*, Allocation>& escapees, Node* where) {
1270                 for (Node* allocation : escapees.keys()) {
1271                     InlineCallFrame* inlineCallFrame = allocation->origin.semantic.inlineCallFrame();
1272                     if (!inlineCallFrame)
1273                         continue;
1274                     if ((inlineCallFrame->isClosureCall || inlineCallFrame->isVarargs()) && inlineCallFrame != where->origin.semantic.inlineCallFrame())
1275                         m_sinkCandidates.remove(allocation);
1276                 }
1277             });
1278         }
1279
1280         // Ensure that the set of sink candidates is closed for put operations
1281         // This is (2) as described above.
1282         Vector<Node*> worklist;
1283         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1284
1285         while (!worklist.isEmpty()) {
1286             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1287                 if (m_sinkCandidates.add(identifier).isNewEntry)
1288                     worklist.append(identifier);
1289             }
1290         }
1291
1292         if (m_sinkCandidates.isEmpty())
1293             return hasUnescapedReads;
1294
1295         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1296             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1297
1298
1299         // Create the materialization nodes.
1300         forEachEscapee([&] (HashMap<Node*, Allocation>& escapees, Node* where) {
1301             placeMaterializations(WTFMove(escapees), where);
1302         });
1303
1304         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1305     }
1306
1307     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1308     {
1309         // First collect the hints that will be needed when the node
1310         // we materialize is still stored into other unescaped sink candidates.
1311         // The way to interpret this vector is:
1312         //
1313         // PromotedHeapLocation(NotEscapedAllocation, field) = identifierAllocation
1314         //
1315         // e.g:
1316         // PromotedHeapLocation(@PhantomNewFunction, FunctionActivationPLoc) = IdentifierOf(@MaterializeCreateActivation)
1317         // or:
1318         // PromotedHeapLocation(@PhantomCreateActivation, ClosureVarPLoc(x)) = IdentifierOf(@NewFunction)
1319         //
1320         // When the rhs of the `=` is to be materialized at this `where` point in the program
1321         // and IdentifierOf(Materialization) is the original sunken allocation of the materialization.
1322         //
1323         // The reason we need to collect all the `identifiers` here is that
1324         // we may materialize multiple versions of the allocation along control
1325         // flow edges. We will PutHint these values along those edges. However,
1326         // we also need to PutHint them when we join and have a Phi of the allocations.
1327         Vector<std::pair<PromotedHeapLocation, Node*>> hints;
1328         for (const auto& entry : m_heap.allocations()) {
1329             if (escapees.contains(entry.key))
1330                 continue;
1331
1332             for (const auto& field : entry.value.fields()) {
1333                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1334                 auto iter = escapees.find(field.value);
1335                 if (iter != escapees.end()) {
1336                     ASSERT(m_sinkCandidates.contains(field.value));
1337                     hints.append(std::make_pair(PromotedHeapLocation(entry.key, field.key), field.value));
1338                 }
1339             }
1340         }
1341
1342         // Now we need to order the materialization. Any order is
1343         // valid (as long as we materialize a node first if it is
1344         // needed for the materialization of another node, e.g. a
1345         // function's activation must be materialized before the
1346         // function itself), but we want to try minimizing the number
1347         // of times we have to place Puts to close cycles after a
1348         // materialization. In other words, we are trying to find the
1349         // minimum number of materializations to remove from the
1350         // materialization graph to make it a DAG, known as the
1351         // (vertex) feedback set problem. Unfortunately, this is a
1352         // NP-hard problem, which we don't want to solve exactly.
1353         //
1354         // Instead, we use a simple greedy procedure, that procedes as
1355         // follow:
1356         //  - While there is at least one node with no outgoing edge
1357         //    amongst the remaining materializations, materialize it
1358         //    first
1359         //
1360         //  - Similarily, while there is at least one node with no
1361         //    incoming edge amongst the remaining materializations,
1362         //    materialize it last.
1363         //
1364         //  - When both previous conditions are false, we have an
1365         //    actual cycle, and we need to pick a node to
1366         //    materialize. We try greedily to remove the "pressure" on
1367         //    the remaining nodes by choosing the node with maximum
1368         //    |incoming edges| * |outgoing edges| as a measure of how
1369         //    "central" to the graph it is. We materialize it first,
1370         //    so that all the recoveries will be Puts of things into
1371         //    it (rather than Puts of the materialization into other
1372         //    objects), which means we will have a single
1373         //    StoreBarrier.
1374
1375
1376         // Compute dependencies between materializations
1377         HashMap<Node*, NodeSet> dependencies;
1378         HashMap<Node*, NodeSet> reverseDependencies;
1379         HashMap<Node*, NodeSet> forMaterialization;
1380         for (const auto& entry : escapees) {
1381             auto& myDependencies = dependencies.add(entry.key, NodeSet()).iterator->value;
1382             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, NodeSet()).iterator->value;
1383             reverseDependencies.add(entry.key, NodeSet());
1384             for (const auto& field : entry.value.fields()) {
1385                 if (escapees.contains(field.value) && field.value != entry.key) {
1386                     myDependencies.addVoid(field.value);
1387                     reverseDependencies.add(field.value, NodeSet()).iterator->value.addVoid(entry.key);
1388                     if (field.key.neededForMaterialization())
1389                         myDependenciesForMaterialization.addVoid(field.value);
1390                 }
1391             }
1392         }
1393
1394         // Helper function to update the materialized set and the
1395         // dependencies
1396         NodeSet materialized;
1397         auto materialize = [&] (Node* identifier) {
1398             materialized.addVoid(identifier);
1399             for (Node* dep : dependencies.get(identifier))
1400                 reverseDependencies.find(dep)->value.remove(identifier);
1401             for (Node* rdep : reverseDependencies.get(identifier)) {
1402                 dependencies.find(rdep)->value.remove(identifier);
1403                 forMaterialization.find(rdep)->value.remove(identifier);
1404             }
1405             dependencies.remove(identifier);
1406             reverseDependencies.remove(identifier);
1407             forMaterialization.remove(identifier);
1408         };
1409
1410         // Nodes without remaining unmaterialized fields will be
1411         // materialized first - amongst the remaining unmaterialized
1412         // nodes
1413         StdList<Allocation> toMaterialize;
1414         auto firstPos = toMaterialize.begin();
1415         auto materializeFirst = [&] (Allocation&& allocation) {
1416             materialize(allocation.identifier());
1417             // We need to insert *after* the current position
1418             if (firstPos != toMaterialize.end())
1419                 ++firstPos;
1420             firstPos = toMaterialize.insert(firstPos, WTFMove(allocation));
1421         };
1422
1423         // Nodes that no other unmaterialized node points to will be
1424         // materialized last - amongst the remaining unmaterialized
1425         // nodes
1426         auto lastPos = toMaterialize.end();
1427         auto materializeLast = [&] (Allocation&& allocation) {
1428             materialize(allocation.identifier());
1429             lastPos = toMaterialize.insert(lastPos, WTFMove(allocation));
1430         };
1431
1432         // These are the promoted locations that contains some of the
1433         // allocations we are currently escaping. If they are a location on
1434         // some other allocation we are currently materializing, we will need
1435         // to "recover" their value with a real put once the corresponding
1436         // allocation is materialized; if they are a location on some other
1437         // not-yet-materialized allocation, we will need a PutHint.
1438         Vector<PromotedHeapLocation> toRecover;
1439
1440         // This loop does the actual cycle breaking
1441         while (!escapees.isEmpty()) {
1442             materialized.clear();
1443
1444             // Materialize nodes that won't require recoveries if we can
1445             for (auto& entry : escapees) {
1446                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1447                     continue;
1448
1449                 if (dependencies.find(entry.key)->value.isEmpty()) {
1450                     materializeFirst(WTFMove(entry.value));
1451                     continue;
1452                 }
1453
1454                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1455                     materializeLast(WTFMove(entry.value));
1456                     continue;
1457                 }
1458             }
1459
1460             // We reach this only if there is an actual cycle that needs
1461             // breaking. Because we do not want to solve a NP-hard problem
1462             // here, we just heuristically pick a node and materialize it
1463             // first.
1464             if (materialized.isEmpty()) {
1465                 uint64_t maxEvaluation = 0;
1466                 Allocation* bestAllocation = nullptr;
1467                 for (auto& entry : escapees) {
1468                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1469                         continue;
1470
1471                     uint64_t evaluation =
1472                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1473                     if (evaluation > maxEvaluation) {
1474                         maxEvaluation = evaluation;
1475                         bestAllocation = &entry.value;
1476                     }
1477                 }
1478                 RELEASE_ASSERT(maxEvaluation > 0);
1479
1480                 materializeFirst(WTFMove(*bestAllocation));
1481             }
1482             RELEASE_ASSERT(!materialized.isEmpty());
1483
1484             for (Node* identifier : materialized)
1485                 escapees.remove(identifier);
1486         }
1487
1488         materialized.clear();
1489
1490         NodeSet escaped;
1491         for (const Allocation& allocation : toMaterialize)
1492             escaped.addVoid(allocation.identifier());
1493         for (const Allocation& allocation : toMaterialize) {
1494             for (const auto& field : allocation.fields()) {
1495                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1496                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1497             }
1498             materialized.addVoid(allocation.identifier());
1499         }
1500
1501         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1502             where, Vector<Node*>()).iterator->value;
1503
1504         for (const Allocation& allocation : toMaterialize) {
1505             Node* materialization = createMaterialization(allocation, where);
1506             materializations.append(materialization);
1507             m_materializationToEscapee.add(materialization, allocation.identifier());
1508         }
1509
1510         if (!toRecover.isEmpty()) {
1511             m_materializationSiteToRecoveries.add(
1512                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1513         }
1514
1515         // The hints need to be after the "real" recoveries so that we
1516         // don't hint not-yet-complete objects
1517         m_materializationSiteToHints.add(
1518             where, Vector<std::pair<PromotedHeapLocation, Node*>>()).iterator->value.appendVector(hints);
1519     }
1520
1521     Node* createMaterialization(const Allocation& allocation, Node* where)
1522     {
1523         // FIXME: This is the only place where we actually use the
1524         // fact that an allocation's identifier is indeed the node
1525         // that created the allocation.
1526         switch (allocation.kind()) {
1527         case Allocation::Kind::Object: {
1528             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1529
1530             return m_graph.addNode(
1531                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1532                 where->origin.withSemantic(allocation.identifier()->origin.semantic),
1533                 OpInfo(m_graph.addStructureSet(allocation.structures())), OpInfo(data), 0, 0);
1534         }
1535
1536         case Allocation::Kind::AsyncGeneratorFunction:
1537         case Allocation::Kind::AsyncFunction:
1538         case Allocation::Kind::GeneratorFunction:
1539         case Allocation::Kind::Function: {
1540             FrozenValue* executable = allocation.identifier()->cellOperand();
1541             
1542             NodeType nodeType;
1543             switch (allocation.kind()) {
1544             case Allocation::Kind::GeneratorFunction:
1545                 nodeType = NewGeneratorFunction;
1546                 break;
1547             case Allocation::Kind::AsyncGeneratorFunction:
1548                 nodeType = NewAsyncGeneratorFunction;
1549                 break;
1550             case Allocation::Kind::AsyncFunction:
1551                 nodeType = NewAsyncFunction;
1552                 break;
1553             default:
1554                 nodeType = NewFunction;
1555             }
1556
1557             return m_graph.addNode(
1558                 allocation.identifier()->prediction(), nodeType,
1559                 where->origin.withSemantic(
1560                     allocation.identifier()->origin.semantic),
1561                 OpInfo(executable));
1562         }
1563
1564         case Allocation::Kind::Activation: {
1565             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1566             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1567
1568             return m_graph.addNode(
1569                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1570                 where->origin.withSemantic(
1571                     allocation.identifier()->origin.semantic),
1572                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1573         }
1574
1575         case Allocation::Kind::RegExpObject: {
1576             FrozenValue* regExp = allocation.identifier()->cellOperand();
1577             return m_graph.addNode(
1578                 allocation.identifier()->prediction(), NewRegexp,
1579                 where->origin.withSemantic(
1580                     allocation.identifier()->origin.semantic),
1581                 OpInfo(regExp));
1582         }
1583
1584         default:
1585             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1586         }
1587     }
1588
1589     void promoteLocalHeap()
1590     {
1591         // Collect the set of heap locations that we will be operating
1592         // over.
1593         HashSet<PromotedHeapLocation> locations;
1594         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1595             m_heap = m_heapAtHead[block];
1596
1597             for (Node* node : *block) {
1598                 handleNode(
1599                     node,
1600                     [&] (PromotedHeapLocation location, LazyNode) {
1601                         // If the location is not on a sink candidate,
1602                         // we only sink it if it is read
1603                         if (m_sinkCandidates.contains(location.base()))
1604                             locations.addVoid(location);
1605                     },
1606                     [&] (PromotedHeapLocation location) -> Node* {
1607                         locations.addVoid(location);
1608                         return nullptr;
1609                     });
1610             }
1611         }
1612
1613         // Figure out which locations belong to which allocations.
1614         m_locationsForAllocation.clear();
1615         for (PromotedHeapLocation location : locations) {
1616             auto result = m_locationsForAllocation.add(
1617                 location.base(),
1618                 Vector<PromotedHeapLocation>());
1619             ASSERT(!result.iterator->value.contains(location));
1620             result.iterator->value.append(location);
1621         }
1622
1623         m_pointerSSA.reset();
1624         m_allocationSSA.reset();
1625
1626         // Collect the set of "variables" that we will be sinking.
1627         m_locationToVariable.clear();
1628         m_nodeToVariable.clear();
1629         Vector<Node*> indexToNode;
1630         Vector<PromotedHeapLocation> indexToLocation;
1631
1632         for (Node* index : m_sinkCandidates) {
1633             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1634             m_nodeToVariable.add(index, variable);
1635             ASSERT(indexToNode.size() == variable->index());
1636             indexToNode.append(index);
1637         }
1638
1639         for (PromotedHeapLocation location : locations) {
1640             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1641             m_locationToVariable.add(location, variable);
1642             ASSERT(indexToLocation.size() == variable->index());
1643             indexToLocation.append(location);
1644         }
1645
1646         // We insert all required constants at top of block 0 so that
1647         // they are inserted only once and we don't clutter the graph
1648         // with useless constants everywhere
1649         HashMap<FrozenValue*, Node*> lazyMapping;
1650         if (!m_bottom)
1651             m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
1652
1653         Vector<HashSet<PromotedHeapLocation>> hintsForPhi(m_sinkCandidates.size());
1654
1655         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1656             m_heap = m_heapAtHead[block];
1657
1658             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1659                 Node* node = block->at(nodeIndex);
1660
1661                 // Some named properties can be added conditionally,
1662                 // and that would necessitate bottoms
1663                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1664                     if (location.kind() != NamedPropertyPLoc)
1665                         continue;
1666
1667                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1668                     m_pointerSSA.newDef(variable, block, m_bottom);
1669                 }
1670
1671                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1672                     Node* escapee = m_materializationToEscapee.get(materialization);
1673                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1674                 }
1675
1676                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node)) {
1677                     PromotedHeapLocation location = pair.first;
1678                     Node* identifier = pair.second;
1679                     // We're materializing `identifier` at this point, and the unmaterialized
1680                     // version is inside `location`. We track which SSA variable this belongs
1681                     // to in case we also need a PutHint for the Phi.
1682                     if (UNLIKELY(validationEnabled())) {
1683                         RELEASE_ASSERT(m_sinkCandidates.contains(location.base()));
1684                         RELEASE_ASSERT(m_sinkCandidates.contains(identifier));
1685
1686                         bool found = false;
1687                         for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1688                             // We're materializing `identifier` here. This asserts that this is indeed the case.
1689                             if (m_materializationToEscapee.get(materialization) == identifier) {
1690                                 found = true;
1691                                 break;
1692                             }
1693                         }
1694                         RELEASE_ASSERT(found);
1695                     }
1696
1697                     SSACalculator::Variable* variable = m_nodeToVariable.get(identifier);
1698                     hintsForPhi[variable->index()].addVoid(location);
1699                 }
1700
1701                 if (m_sinkCandidates.contains(node))
1702                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1703
1704                 handleNode(
1705                     node,
1706                     [&] (PromotedHeapLocation location, LazyNode value) {
1707                         if (!locations.contains(location))
1708                             return;
1709
1710                         Node* nodeValue;
1711                         if (value.isNode())
1712                             nodeValue = value.asNode();
1713                         else {
1714                             auto iter = lazyMapping.find(value.asValue());
1715                             if (iter != lazyMapping.end())
1716                                 nodeValue = iter->value;
1717                             else {
1718                                 nodeValue = value.ensureIsNode(
1719                                     m_insertionSet, m_graph.block(0), 0);
1720                                 lazyMapping.add(value.asValue(), nodeValue);
1721                             }
1722                         }
1723
1724                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1725                         m_pointerSSA.newDef(variable, block, nodeValue);
1726                     },
1727                     [] (PromotedHeapLocation) -> Node* {
1728                         return nullptr;
1729                     });
1730             }
1731         }
1732         m_insertionSet.execute(m_graph.block(0));
1733
1734         // Run the SSA calculators to create Phis
1735         m_pointerSSA.computePhis(
1736             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1737                 PromotedHeapLocation location = indexToLocation[variable->index()];
1738
1739                 // Don't create Phi nodes for fields of dead allocations
1740                 if (!m_heapAtHead[block].isAllocation(location.base()))
1741                     return nullptr;
1742
1743                 // Don't create Phi nodes once we are escaped
1744                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1745                     return nullptr;
1746
1747                 // If we point to a single allocation, we will
1748                 // directly use its materialization
1749                 if (m_heapAtHead[block].follow(location))
1750                     return nullptr;
1751
1752                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1753                 phiNode->mergeFlags(NodeResultJS);
1754                 return phiNode;
1755             });
1756
1757         m_allocationSSA.computePhis(
1758             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1759                 Node* identifier = indexToNode[variable->index()];
1760
1761                 // Don't create Phi nodes for dead allocations
1762                 if (!m_heapAtHead[block].isAllocation(identifier))
1763                     return nullptr;
1764
1765                 // Don't create Phi nodes until we are escaped
1766                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1767                     return nullptr;
1768
1769                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1770                 phiNode->mergeFlags(NodeResultJS);
1771                 return phiNode;
1772             });
1773
1774         // Place Phis in the right places, replace all uses of any load with the appropriate
1775         // value, and create the materialization nodes.
1776         LocalOSRAvailabilityCalculator availabilityCalculator(m_graph);
1777         m_graph.clearReplacements();
1778         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1779             m_heap = m_heapAtHead[block];
1780             availabilityCalculator.beginBlock(block);
1781
1782             // These mapping tables are intended to be lazy. If
1783             // something is omitted from the table, it means that
1784             // there haven't been any local stores to the promoted
1785             // heap location (or any local materialization).
1786             m_localMapping.clear();
1787             m_escapeeToMaterialization.clear();
1788
1789             // Insert the Phi functions that we had previously
1790             // created.
1791             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1792                 SSACalculator::Variable* variable = phiDef->variable();
1793                 m_insertionSet.insert(0, phiDef->value());
1794
1795                 PromotedHeapLocation location = indexToLocation[variable->index()];
1796                 m_localMapping.set(location, phiDef->value());
1797
1798                 if (m_sinkCandidates.contains(location.base())) {
1799                     m_insertionSet.insert(
1800                         0,
1801                         location.createHint(
1802                             m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1803                 }
1804             }
1805
1806             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1807                 SSACalculator::Variable* variable = phiDef->variable();
1808                 m_insertionSet.insert(0, phiDef->value());
1809
1810                 Node* identifier = indexToNode[variable->index()];
1811                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1812                 bool canExit = false;
1813                 insertOSRHintsForUpdate(
1814                     0, block->at(0)->origin, canExit,
1815                     availabilityCalculator.m_availability, identifier, phiDef->value());
1816
1817                 for (PromotedHeapLocation location : hintsForPhi[variable->index()]) {
1818                     if (m_heap.onlyLocalAllocation(location.base())) {
1819                         m_insertionSet.insert(0,
1820                             location.createHint(m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1821                         m_localMapping.set(location, phiDef->value());
1822                     }
1823                 }
1824             }
1825
1826             if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
1827                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1828                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1829             }
1830
1831             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1832                 Node* node = block->at(nodeIndex);
1833                 bool canExit = true;
1834                 bool nextCanExit = node->origin.exitOK;
1835                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1836                     if (location.kind() != NamedPropertyPLoc)
1837                         continue;
1838
1839                     m_localMapping.set(location, m_bottom);
1840
1841                     if (m_sinkCandidates.contains(node)) {
1842                         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1843                             dataLog("For sink candidate ", node, " found location ", location, "\n");
1844                         m_insertionSet.insert(
1845                             nodeIndex + 1,
1846                             location.createHint(
1847                                 m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
1848                     }
1849                 }
1850
1851                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1852                     materialization->origin.exitOK &= canExit;
1853                     Node* escapee = m_materializationToEscapee.get(materialization);
1854                     populateMaterialization(block, materialization, escapee);
1855                     m_escapeeToMaterialization.set(escapee, materialization);
1856                     m_insertionSet.insert(nodeIndex, materialization);
1857                     if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1858                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1859                 }
1860
1861                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1862                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
1863                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node))
1864                     m_insertionSet.insert(nodeIndex, createRecovery(block, pair.first, node, canExit));
1865
1866                 // We need to put the OSR hints after the recoveries,
1867                 // because we only want the hints once the object is
1868                 // complete
1869                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1870                     Node* escapee = m_materializationToEscapee.get(materialization);
1871                     insertOSRHintsForUpdate(
1872                         nodeIndex, node->origin, canExit,
1873                         availabilityCalculator.m_availability, escapee, materialization);
1874                 }
1875
1876                 if (node->origin.exitOK && !canExit) {
1877                     // We indicate that the exit state is fine now. It is OK because we updated the
1878                     // state above. We need to indicate this manually because the validation doesn't
1879                     // have enough information to infer that the exit state is fine.
1880                     m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
1881                 }
1882
1883                 if (m_sinkCandidates.contains(node))
1884                     m_escapeeToMaterialization.set(node, node);
1885
1886                 availabilityCalculator.executeNode(node);
1887
1888                 bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
1889
1890                 bool doLower = false;
1891                 handleNode(
1892                     node,
1893                     [&] (PromotedHeapLocation location, LazyNode value) {
1894                         if (!locations.contains(location))
1895                             return;
1896
1897                         Node* nodeValue;
1898                         if (value.isNode())
1899                             nodeValue = value.asNode();
1900                         else
1901                             nodeValue = lazyMapping.get(value.asValue());
1902
1903                         nodeValue = resolve(block, nodeValue);
1904
1905                         m_localMapping.set(location, nodeValue);
1906
1907                         if (!m_sinkCandidates.contains(location.base()))
1908                             return;
1909
1910                         doLower = true;
1911
1912                         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1913                             dataLog("Creating hint with value ", nodeValue, " before ", node, "\n");
1914                         m_insertionSet.insert(
1915                             nodeIndex + 1,
1916                             location.createHint(
1917                                 m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
1918                     },
1919                     [&] (PromotedHeapLocation location) -> Node* {
1920                         return resolve(block, location);
1921                     });
1922
1923                 if (!nextCanExit && desiredNextExitOK) {
1924                     // We indicate that the exit state is fine now. We need to do this because we
1925                     // emitted hints that appear to invalidate the exit state.
1926                     m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
1927                 }
1928
1929                 if (m_sinkCandidates.contains(node) || doLower) {
1930                     switch (node->op()) {
1931                     case NewObject:
1932                         node->convertToPhantomNewObject();
1933                         break;
1934
1935                     case NewFunction:
1936                         node->convertToPhantomNewFunction();
1937                         break;
1938
1939                     case NewGeneratorFunction:
1940                         node->convertToPhantomNewGeneratorFunction();
1941                         break;
1942
1943                     case NewAsyncGeneratorFunction:
1944                         node->convertToPhantomNewAsyncGeneratorFunction();
1945                         break;
1946
1947                     case NewAsyncFunction:
1948                         node->convertToPhantomNewAsyncFunction();
1949                         break;
1950
1951                     case CreateActivation:
1952                         node->convertToPhantomCreateActivation();
1953                         break;
1954
1955                     case NewRegexp:
1956                         node->convertToPhantomNewRegexp();
1957                         break;
1958
1959                     default:
1960                         node->remove(m_graph);
1961                         break;
1962                     }
1963                 }
1964
1965                 m_graph.doToChildren(
1966                     node,
1967                     [&] (Edge& edge) {
1968                         edge.setNode(resolve(block, edge.node()));
1969                     });
1970             }
1971
1972             // Gotta drop some Upsilons.
1973             NodeAndIndex terminal = block->findTerminal();
1974             size_t upsilonInsertionPoint = terminal.index;
1975             NodeOrigin upsilonOrigin = terminal.node->origin;
1976             for (BasicBlock* successorBlock : block->successors()) {
1977                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1978                     Node* phiNode = phiDef->value();
1979                     SSACalculator::Variable* variable = phiDef->variable();
1980                     PromotedHeapLocation location = indexToLocation[variable->index()];
1981                     Node* incoming = resolve(block, location);
1982
1983                     m_insertionSet.insertNode(
1984                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1985                         OpInfo(phiNode), incoming->defaultEdge());
1986                 }
1987
1988                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1989                     Node* phiNode = phiDef->value();
1990                     SSACalculator::Variable* variable = phiDef->variable();
1991                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1992
1993                     m_insertionSet.insertNode(
1994                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1995                         OpInfo(phiNode), incoming->defaultEdge());
1996                 }
1997             }
1998
1999             m_insertionSet.execute(block);
2000         }
2001     }
2002
2003     NEVER_INLINE Node* resolve(BasicBlock* block, PromotedHeapLocation location)
2004     {
2005         // If we are currently pointing to a single local allocation,
2006         // simply return the associated materialization.
2007         if (Node* identifier = m_heap.follow(location))
2008             return getMaterialization(block, identifier);
2009
2010         if (Node* result = m_localMapping.get(location))
2011             return result;
2012
2013         // This implies that there is no local mapping. Find a non-local mapping.
2014         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
2015             block, m_locationToVariable.get(location));
2016         ASSERT(def);
2017         ASSERT(def->value());
2018
2019         Node* result = def->value();
2020         if (result->replacement())
2021             result = result->replacement();
2022         ASSERT(!result->replacement());
2023
2024         m_localMapping.add(location, result);
2025         return result;
2026     }
2027
2028     NEVER_INLINE Node* resolve(BasicBlock* block, Node* node)
2029     {
2030         // If we are currently pointing to a single local allocation,
2031         // simply return the associated materialization.
2032         if (Node* identifier = m_heap.follow(node))
2033             return getMaterialization(block, identifier);
2034
2035         if (node->replacement())
2036             node = node->replacement();
2037         ASSERT(!node->replacement());
2038
2039         return node;
2040     }
2041
2042     NEVER_INLINE Node* getMaterialization(BasicBlock* block, Node* identifier)
2043     {
2044         ASSERT(m_heap.isAllocation(identifier));
2045         if (!m_sinkCandidates.contains(identifier))
2046             return identifier;
2047
2048         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
2049             return materialization;
2050
2051         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
2052             block, m_nodeToVariable.get(identifier));
2053         ASSERT(def && def->value());
2054         m_escapeeToMaterialization.add(identifier, def->value());
2055         ASSERT(!def->value()->replacement());
2056         return def->value();
2057     }
2058
2059     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
2060     {
2061         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
2062             dataLog("Inserting OSR hints at ", origin, ":\n");
2063             dataLog("    Escapee: ", escapee, "\n");
2064             dataLog("    Materialization: ", materialization, "\n");
2065             dataLog("    Availability: ", availability, "\n");
2066         }
2067         
2068         // We need to follow() the value in the heap.
2069         // Consider the following graph:
2070         //
2071         // Block #0
2072         //   0: NewObject({})
2073         //   1: NewObject({})
2074         //   -: PutByOffset(@0, @1, x:0)
2075         //   -: PutStructure(@0, {x:0})
2076         //   2: GetByOffset(@0, x:0)
2077         //   -: MovHint(@2, loc1)
2078         //   -: Branch(#1, #2)
2079         //
2080         // Block #1
2081         //   3: Call(f, @1)
2082         //   4: Return(@0)
2083         //
2084         // Block #2
2085         //   -: Return(undefined)
2086         //
2087         // We need to materialize @1 at @3, and when doing so we need
2088         // to insert a MovHint for the materialization into loc1 as
2089         // well.
2090         // In order to do this, we say that we need to insert an
2091         // update hint for any availability whose node resolve()s to
2092         // the materialization.
2093         for (auto entry : availability.m_heap) {
2094             if (!entry.value.hasNode())
2095                 continue;
2096             if (m_heap.follow(entry.value.node()) != escapee)
2097                 continue;
2098
2099             m_insertionSet.insert(
2100                 nodeIndex,
2101                 entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
2102         }
2103
2104         for (unsigned i = availability.m_locals.size(); i--;) {
2105             if (!availability.m_locals[i].hasNode())
2106                 continue;
2107             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
2108                 continue;
2109
2110             int operand = availability.m_locals.operandForIndex(i);
2111             m_insertionSet.insertNode(
2112                 nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
2113                 materialization->defaultEdge());
2114         }
2115     }
2116
2117     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
2118     {
2119         Allocation& allocation = m_heap.getAllocation(escapee);
2120         switch (node->op()) {
2121         case MaterializeNewObject: {
2122             ObjectMaterializationData& data = node->objectMaterializationData();
2123             unsigned firstChild = m_graph.m_varArgChildren.size();
2124
2125             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2126
2127             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
2128             ASSERT(locations.contains(structure));
2129
2130             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
2131
2132             for (PromotedHeapLocation location : locations) {
2133                 switch (location.kind()) {
2134                 case StructurePLoc:
2135                     ASSERT(location == structure);
2136                     break;
2137
2138                 case NamedPropertyPLoc: {
2139                     ASSERT(location.base() == allocation.identifier());
2140                     data.m_properties.append(location.descriptor());
2141                     Node* value = resolve(block, location);
2142                     if (m_sinkCandidates.contains(value))
2143                         m_graph.m_varArgChildren.append(m_bottom);
2144                     else
2145                         m_graph.m_varArgChildren.append(value);
2146                     break;
2147                 }
2148
2149                 default:
2150                     DFG_CRASH(m_graph, node, "Bad location kind");
2151                 }
2152             }
2153
2154             node->children = AdjacencyList(
2155                 AdjacencyList::Variable,
2156                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2157             break;
2158         }
2159
2160         case MaterializeCreateActivation: {
2161             ObjectMaterializationData& data = node->objectMaterializationData();
2162
2163             unsigned firstChild = m_graph.m_varArgChildren.size();
2164
2165             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2166
2167             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
2168             ASSERT(locations.contains(symbolTable));
2169             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
2170             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
2171
2172             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
2173             ASSERT(locations.contains(scope));
2174             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
2175
2176             for (PromotedHeapLocation location : locations) {
2177                 switch (location.kind()) {
2178                 case ActivationScopePLoc: {
2179                     ASSERT(location == scope);
2180                     break;
2181                 }
2182
2183                 case ActivationSymbolTablePLoc: {
2184                     ASSERT(location == symbolTable);
2185                     break;
2186                 }
2187
2188                 case ClosureVarPLoc: {
2189                     ASSERT(location.base() == allocation.identifier());
2190                     data.m_properties.append(location.descriptor());
2191                     Node* value = resolve(block, location);
2192                     if (m_sinkCandidates.contains(value))
2193                         m_graph.m_varArgChildren.append(m_bottom);
2194                     else
2195                         m_graph.m_varArgChildren.append(value);
2196                     break;
2197                 }
2198
2199                 default:
2200                     DFG_CRASH(m_graph, node, "Bad location kind");
2201                 }
2202             }
2203
2204             node->children = AdjacencyList(
2205                 AdjacencyList::Variable,
2206                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2207             break;
2208         }
2209         
2210         case NewFunction:
2211         case NewGeneratorFunction:
2212         case NewAsyncGeneratorFunction:
2213         case NewAsyncFunction: {
2214             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2215             ASSERT(locations.size() == 2);
2216                 
2217             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
2218             ASSERT_UNUSED(executable, locations.contains(executable));
2219                 
2220             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
2221             ASSERT(locations.contains(activation));
2222
2223             node->child1() = Edge(resolve(block, activation), KnownCellUse);
2224             break;
2225         }
2226
2227         case NewRegexp: {
2228             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2229             ASSERT(locations.size() == 2);
2230
2231             PromotedHeapLocation regExp(RegExpObjectRegExpPLoc, allocation.identifier());
2232             ASSERT_UNUSED(regExp, locations.contains(regExp));
2233
2234             PromotedHeapLocation lastIndex(RegExpObjectLastIndexPLoc, allocation.identifier());
2235             ASSERT(locations.contains(lastIndex));
2236             Node* value = resolve(block, lastIndex);
2237             if (m_sinkCandidates.contains(value))
2238                 node->child1() = Edge(m_bottom);
2239             else
2240                 node->child1() = Edge(value);
2241             break;
2242         }
2243
2244         default:
2245             DFG_CRASH(m_graph, node, "Bad materialize op");
2246         }
2247     }
2248
2249     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
2250     {
2251         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
2252             dataLog("Recovering ", location, " at ", where, "\n");
2253         ASSERT(location.base()->isPhantomAllocation());
2254         Node* base = getMaterialization(block, location.base());
2255         Node* value = resolve(block, location);
2256
2257         NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
2258
2259         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
2260             dataLog("Base is ", base, " and value is ", value, "\n");
2261
2262         if (base->isPhantomAllocation()) {
2263             return PromotedHeapLocation(base, location.descriptor()).createHint(
2264                 m_graph, origin.takeValidExit(canExit), value);
2265         }
2266
2267         switch (location.kind()) {
2268         case NamedPropertyPLoc: {
2269             Allocation& allocation = m_heap.getAllocation(location.base());
2270
2271             unsigned identifierNumber = location.info();
2272             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2273
2274             Vector<RegisteredStructure> structures;
2275             for (RegisteredStructure structure : allocation.structures()) {
2276                 // This structure set is conservative. This set can include Structure which does not have a legit property.
2277                 // We filter out such an apparently inappropriate structures here since MultiPutByOffset assumes all the structures
2278                 // have valid corresponding offset for the given property.
2279                 if (structure->getConcurrently(uid) != invalidOffset)
2280                     structures.append(structure);
2281             }
2282
2283             // Since we filter structures, it is possible that we no longer have any structures here. In this case, we emit ForceOSRExit.
2284             if (structures.isEmpty())
2285                 return m_graph.addNode(ForceOSRExit, origin.takeValidExit(canExit));
2286
2287             std::sort(
2288                 structures.begin(),
2289                 structures.end(),
2290                 [uid] (RegisteredStructure a, RegisteredStructure b) -> bool {
2291                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2292                 });
2293
2294             RELEASE_ASSERT(structures.size());
2295             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2296
2297             if (firstOffset == structures.last()->getConcurrently(uid)) {
2298                 Node* storage = base;
2299                 // FIXME: When we decide to sink objects with a
2300                 // property storage, we should handle non-inline offsets.
2301                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2302
2303                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2304                 data->offset = firstOffset;
2305                 data->identifierNumber = identifierNumber;
2306
2307                 return m_graph.addNode(
2308                     PutByOffset,
2309                     origin.takeValidExit(canExit),
2310                     OpInfo(data),
2311                     Edge(storage, KnownCellUse),
2312                     Edge(base, KnownCellUse),
2313                     value->defaultEdge());
2314             }
2315
2316             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2317             data->identifierNumber = identifierNumber;
2318
2319             {
2320                 PropertyOffset currentOffset = firstOffset;
2321                 StructureSet currentSet;
2322                 for (RegisteredStructure structure : structures) {
2323                     PropertyOffset offset = structure->getConcurrently(uid);
2324                     if (offset != currentOffset) {
2325                         // Because our analysis treats MultiPutByOffset like an escape, we only have to
2326                         // deal with storing results that would have been previously stored by PutByOffset
2327                         // nodes. Those nodes were guarded by the appropriate type checks. This means that
2328                         // at this point, we can simply trust that the incoming value has the right type
2329                         // for whatever structure we are using.
2330                         data->variants.append(
2331                             PutByIdVariant::replace(currentSet, currentOffset));
2332                         currentOffset = offset;
2333                         currentSet.clear();
2334                     }
2335                     currentSet.add(structure.get());
2336                 }
2337                 data->variants.append(
2338                     PutByIdVariant::replace(currentSet, currentOffset));
2339             }
2340
2341             return m_graph.addNode(
2342                 MultiPutByOffset,
2343                 origin.takeValidExit(canExit),
2344                 OpInfo(data),
2345                 Edge(base, KnownCellUse),
2346                 value->defaultEdge());
2347         }
2348
2349         case ClosureVarPLoc: {
2350             return m_graph.addNode(
2351                 PutClosureVar,
2352                 origin.takeValidExit(canExit),
2353                 OpInfo(location.info()),
2354                 Edge(base, KnownCellUse),
2355                 value->defaultEdge());
2356         }
2357
2358         case RegExpObjectLastIndexPLoc: {
2359             return m_graph.addNode(
2360                 SetRegExpObjectLastIndex,
2361                 origin.takeValidExit(canExit),
2362                 OpInfo(true),
2363                 Edge(base, KnownCellUse),
2364                 value->defaultEdge());
2365         }
2366
2367         default:
2368             DFG_CRASH(m_graph, base, "Bad location kind");
2369             break;
2370         }
2371
2372         RELEASE_ASSERT_NOT_REACHED();
2373     }
2374     
2375     void removeICStatusFilters()
2376     {
2377         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
2378             for (Node* node : *block) {
2379                 switch (node->op()) {
2380                 case FilterCallLinkStatus:
2381                 case FilterGetByStatus:
2382                 case FilterPutByIdStatus:
2383                 case FilterInByIdStatus:
2384                     if (node->child1()->isPhantomAllocation())
2385                         node->removeWithoutChecks();
2386                     break;
2387                 default:
2388                     break;
2389                 }
2390             }
2391         }
2392     }
2393
2394     // This is a great way of asking value->isStillValid() without having to worry about getting
2395     // different answers. It turns out that this analysis works OK regardless of what this
2396     // returns but breaks badly if this changes its mind for any particular InferredValue. This
2397     // method protects us from that.
2398     bool isStillValid(SymbolTable* value)
2399     {
2400         return m_validInferredValues.add(value, value->singleton().isStillValid()).iterator->value;
2401     }
2402
2403     bool isStillValid(FunctionExecutable* value)
2404     {
2405         return m_validInferredValues.add(value, value->singleton().isStillValid()).iterator->value;
2406     }
2407
2408
2409     SSACalculator m_pointerSSA;
2410     SSACalculator m_allocationSSA;
2411     NodeSet m_sinkCandidates;
2412     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2413     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2414     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2415     HashMap<Node*, Node*> m_escapeeToMaterialization;
2416     InsertionSet m_insertionSet;
2417     CombinedLiveness m_combinedLiveness;
2418
2419     HashMap<JSCell*, bool> m_validInferredValues;
2420
2421     HashMap<Node*, Node*> m_materializationToEscapee;
2422     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2423     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2424     HashMap<Node*, Vector<std::pair<PromotedHeapLocation, Node*>>> m_materializationSiteToHints;
2425
2426     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2427
2428     BlockMap<LocalHeap> m_heapAtHead;
2429     BlockMap<LocalHeap> m_heapAtTail;
2430     LocalHeap m_heap;
2431
2432     Node* m_bottom = nullptr;
2433 };
2434
2435 }
2436
2437 bool performObjectAllocationSinking(Graph& graph)
2438 {
2439     return runPhase<ObjectAllocationSinkingPhase>(graph);
2440 }
2441
2442 } } // namespace JSC::DFG
2443
2444 #endif // ENABLE(DFG_JIT)