We should support CreateThis in the FTL
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGClobbersExitState.h"
33 #include "DFGCombinedLiveness.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertionSet.h"
36 #include "DFGLazyNode.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromotedHeapLocation.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44 #include <wtf/StdList.h>
45
46 namespace JSC { namespace DFG {
47
48 namespace {
49
50 namespace DFGObjectAllocationSinkingPhaseInternal {
51 static const bool verbose = false;
52 }
53
54 // In order to sink object cycles, we use a points-to analysis coupled
55 // with an escape analysis. This analysis is actually similar to an
56 // abstract interpreter focused on local allocations and ignoring
57 // everything else.
58 //
59 // We represent the local heap using two mappings:
60 //
61 // - A set of the local allocations present in the function, where
62 //   each of those have a further mapping from
63 //   PromotedLocationDescriptor to local allocations they must point
64 //   to.
65 //
66 // - A "pointer" mapping from nodes to local allocations, if they must
67 //   be equal to said local allocation and are currently live. This
68 //   can be because the node is the actual node that created the
69 //   allocation, or any other node that must currently point to it -
70 //   we don't make a difference.
71 //
72 // The following graph is a motivation for why we separate allocations
73 // from pointers:
74 //
75 // Block #0
76 //  0: NewObject({})
77 //  1: NewObject({})
78 //  -: PutByOffset(@0, @1, x)
79 //  -: PutStructure(@0, {x:0})
80 //  2: GetByOffset(@0, x)
81 //  -: Jump(#1)
82 //
83 // Block #1
84 //  -: Return(@2)
85 //
86 // Here, we need to remember in block #1 that @2 points to a local
87 // allocation with appropriate fields and structures information
88 // (because we should be able to place a materialization on top of
89 // block #1 here), even though @1 is dead. We *could* just keep @1
90 // artificially alive here, but there is no real reason to do it:
91 // after all, by the end of block #0, @1 and @2 should be completely
92 // interchangeable, and there is no reason for us to artificially make
93 // @1 more important.
94 //
95 // An important point to consider to understand this separation is
96 // that we should think of the local heap as follow: we have a
97 // bunch of nodes that are pointers to "allocations" that live
98 // someplace on the heap, and those allocations can have pointers in
99 // between themselves as well. We shouldn't care about whatever
100 // names we give to the allocations ; what matters when
101 // comparing/merging two heaps is the isomorphism/comparison between
102 // the allocation graphs as seen by the nodes.
103 //
104 // For instance, in the following graph:
105 //
106 // Block #0
107 //  0: NewObject({})
108 //  -: Branch(#1, #2)
109 //
110 // Block #1
111 //  1: NewObject({})
112 //  -: PutByOffset(@0, @1, x)
113 //  -: PutStructure(@0, {x:0})
114 //  -: Jump(#3)
115 //
116 // Block #2
117 //  2: NewObject({})
118 //  -: PutByOffset(@2, undefined, x)
119 //  -: PutStructure(@2, {x:0})
120 //  -: PutByOffset(@0, @2, x)
121 //  -: PutStructure(@0, {x:0})
122 //  -: Jump(#3)
123 //
124 // Block #3
125 //  -: Return(@0)
126 //
127 // we should think of the heaps at tail of blocks #1 and #2 as being
128 // exactly the same, even though one has @0.x pointing to @1 and the
129 // other has @0.x pointing to @2, because in essence this should not
130 // be different from the graph where we hoisted @1 and @2 into a
131 // single allocation in block #0. We currently will not handle this
132 // case, because we merge allocations based on the node they are
133 // coming from, but this is only a technicality for the sake of
134 // simplicity that shouldn't hide the deeper idea outlined here.
135
136 class Allocation {
137 public:
138     // We use Escaped as a special allocation kind because when we
139     // decide to sink an allocation, we still need to keep track of it
140     // once it is escaped if it still has pointers to it in order to
141     // replace any use of those pointers by the corresponding
142     // materialization
143     enum class Kind { Escaped, Object, Activation, Function, GeneratorFunction, AsyncFunction, AsyncGeneratorFunction, RegExpObject };
144
145     using Fields = HashMap<PromotedLocationDescriptor, Node*>;
146
147     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
148         : m_identifier(identifier)
149         , m_kind(kind)
150     {
151     }
152
153
154     const Fields& fields() const
155     {
156         return m_fields;
157     }
158
159     Fields& fields()
160     {
161         return m_fields;
162     }
163
164     Node* get(PromotedLocationDescriptor descriptor)
165     {
166         return m_fields.get(descriptor);
167     }
168
169     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
170     {
171         // Pointing to anything else than an unescaped local
172         // allocation is represented by simply not having the
173         // field
174         if (value)
175             m_fields.set(descriptor, value);
176         else
177             m_fields.remove(descriptor);
178         return *this;
179     }
180
181     void remove(PromotedLocationDescriptor descriptor)
182     {
183         set(descriptor, nullptr);
184     }
185
186     bool hasStructures() const
187     {
188         switch (kind()) {
189         case Kind::Object:
190             return true;
191
192         default:
193             return false;
194         }
195     }
196
197     Allocation& setStructures(const RegisteredStructureSet& structures)
198     {
199         ASSERT(hasStructures() && !structures.isEmpty());
200         m_structures = structures;
201         return *this;
202     }
203
204     Allocation& mergeStructures(const RegisteredStructureSet& structures)
205     {
206         ASSERT(hasStructures() || structures.isEmpty());
207         m_structures.merge(structures);
208         return *this;
209     }
210
211     Allocation& filterStructures(const RegisteredStructureSet& structures)
212     {
213         ASSERT(hasStructures());
214         m_structures.filter(structures);
215         RELEASE_ASSERT(!m_structures.isEmpty());
216         return *this;
217     }
218
219     const RegisteredStructureSet& structures() const
220     {
221         return m_structures;
222     }
223
224     Node* identifier() const { return m_identifier; }
225
226     Kind kind() const { return m_kind; }
227
228     bool isEscapedAllocation() const
229     {
230         return kind() == Kind::Escaped;
231     }
232
233     bool isObjectAllocation() const
234     {
235         return m_kind == Kind::Object;
236     }
237
238     bool isActivationAllocation() const
239     {
240         return m_kind == Kind::Activation;
241     }
242
243     bool isFunctionAllocation() const
244     {
245         return m_kind == Kind::Function || m_kind == Kind::GeneratorFunction || m_kind == Kind::AsyncFunction;
246     }
247
248     bool isRegExpObjectAllocation() const
249     {
250         return m_kind == Kind::RegExpObject;
251     }
252
253     bool operator==(const Allocation& other) const
254     {
255         return m_identifier == other.m_identifier
256             && m_kind == other.m_kind
257             && m_fields == other.m_fields
258             && m_structures == other.m_structures;
259     }
260
261     bool operator!=(const Allocation& other) const
262     {
263         return !(*this == other);
264     }
265
266     void dump(PrintStream& out) const
267     {
268         dumpInContext(out, nullptr);
269     }
270
271     void dumpInContext(PrintStream& out, DumpContext* context) const
272     {
273         switch (m_kind) {
274         case Kind::Escaped:
275             out.print("Escaped");
276             break;
277
278         case Kind::Object:
279             out.print("Object");
280             break;
281
282         case Kind::Function:
283             out.print("Function");
284             break;
285
286         case Kind::GeneratorFunction:
287             out.print("GeneratorFunction");
288             break;
289
290         case Kind::AsyncFunction:
291             out.print("AsyncFunction");
292             break;
293
294         case Kind::AsyncGeneratorFunction:
295             out.print("AsyncGeneratorFunction");
296             break;
297
298         case Kind::Activation:
299             out.print("Activation");
300             break;
301
302         case Kind::RegExpObject:
303             out.print("RegExpObject");
304             break;
305         }
306         out.print("Allocation(");
307         if (!m_structures.isEmpty())
308             out.print(inContext(m_structures.toStructureSet(), context));
309         if (!m_fields.isEmpty()) {
310             if (!m_structures.isEmpty())
311                 out.print(", ");
312             out.print(mapDump(m_fields, " => #", ", "));
313         }
314         out.print(")");
315     }
316
317 private:
318     Node* m_identifier; // This is the actual node that created the allocation
319     Kind m_kind;
320     Fields m_fields;
321     RegisteredStructureSet m_structures;
322 };
323
324 class LocalHeap {
325 public:
326     Allocation& newAllocation(Node* node, Allocation::Kind kind)
327     {
328         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
329         m_pointers.add(node, node);
330         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
331     }
332
333     bool isAllocation(Node* identifier) const
334     {
335         return m_allocations.contains(identifier);
336     }
337
338     // Note that this is fundamentally different from
339     // onlyLocalAllocation() below. getAllocation() takes as argument
340     // a node-as-identifier, that is, an allocation node. This
341     // allocation node doesn't have to be alive; it may only be
342     // pointed to by other nodes or allocation fields.
343     // For instance, in the following graph:
344     //
345     // Block #0
346     //  0: NewObject({})
347     //  1: NewObject({})
348     //  -: PutByOffset(@0, @1, x)
349     //  -: PutStructure(@0, {x:0})
350     //  2: GetByOffset(@0, x)
351     //  -: Jump(#1)
352     //
353     // Block #1
354     //  -: Return(@2)
355     //
356     // At head of block #1, the only reachable allocation is #@1,
357     // which can be reached through node @2. Thus, getAllocation(#@1)
358     // contains the appropriate metadata for this allocation, but
359     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
360     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
361     // is the same as getAllocation(#@1), while getAllocation(#@2)
362     // does not make sense since @2 is not an allocation node.
363     //
364     // This is meant to be used when the node is already known to be
365     // an identifier (i.e. an allocation) - probably because it was
366     // found as value of a field or pointer in the current heap, or
367     // was the result of a call to follow(). In any other cases (such
368     // as when doing anything while traversing the graph), the
369     // appropriate function to call is probably onlyLocalAllocation.
370     Allocation& getAllocation(Node* identifier)
371     {
372         auto iter = m_allocations.find(identifier);
373         ASSERT(iter != m_allocations.end());
374         return iter->value;
375     }
376
377     void newPointer(Node* node, Node* identifier)
378     {
379         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
380         ASSERT(isAllocation(identifier));
381         m_pointers.add(node, identifier);
382     }
383
384     // follow solves the points-to problem. Given a live node, which
385     // may be either an allocation itself or a heap read (e.g. a
386     // GetByOffset node), it returns the corresponding allocation
387     // node, if there is one. If the argument node is neither an
388     // allocation or a heap read, or may point to different nodes,
389     // nullptr will be returned. Note that a node that points to
390     // different nodes can never point to an unescaped local
391     // allocation.
392     Node* follow(Node* node) const
393     {
394         auto iter = m_pointers.find(node);
395         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
396         return iter == m_pointers.end() ? nullptr : iter->value;
397     }
398
399     Node* follow(PromotedHeapLocation location) const
400     {
401         const Allocation& base = m_allocations.find(location.base())->value;
402         auto iter = base.fields().find(location.descriptor());
403
404         if (iter == base.fields().end())
405             return nullptr;
406
407         return iter->value;
408     }
409
410     // onlyLocalAllocation find the corresponding allocation metadata
411     // for any live node. onlyLocalAllocation(node) is essentially
412     // getAllocation(follow(node)), with appropriate null handling.
413     Allocation* onlyLocalAllocation(Node* node)
414     {
415         Node* identifier = follow(node);
416         if (!identifier)
417             return nullptr;
418
419         return &getAllocation(identifier);
420     }
421
422     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
423     {
424         Node* identifier = follow(location);
425         if (!identifier)
426             return nullptr;
427
428         return &getAllocation(identifier);
429     }
430
431     // This allows us to store the escapees only when necessary. If
432     // set, the current escapees can be retrieved at any time using
433     // takeEscapees(), which will clear the cached set of escapees;
434     // otherwise the heap won't remember escaping allocations.
435     void setWantEscapees()
436     {
437         m_wantEscapees = true;
438     }
439
440     HashMap<Node*, Allocation> takeEscapees()
441     {
442         return WTFMove(m_escapees);
443     }
444
445     void escape(Node* node)
446     {
447         Node* identifier = follow(node);
448         if (!identifier)
449             return;
450
451         escapeAllocation(identifier);
452     }
453
454     void merge(const LocalHeap& other)
455     {
456         assertIsValid();
457         other.assertIsValid();
458         ASSERT(!m_wantEscapees);
459
460         if (!reached()) {
461             ASSERT(other.reached());
462             *this = other;
463             return;
464         }
465
466         NodeSet toEscape;
467
468         for (auto& allocationEntry : other.m_allocations)
469             m_allocations.add(allocationEntry.key, allocationEntry.value);
470         for (auto& allocationEntry : m_allocations) {
471             auto allocationIter = other.m_allocations.find(allocationEntry.key);
472
473             // If we have it and they don't, it died for them but we
474             // are keeping it alive from another field somewhere.
475             // There is nothing to do - we will be escaped
476             // automatically when we handle that other field.
477             // This will also happen for allocation that we have and
478             // they don't, and all of those will get pruned.
479             if (allocationIter == other.m_allocations.end())
480                 continue;
481
482             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
483                 toEscape.addVoid(allocationEntry.key);
484                 for (const auto& fieldEntry : allocationIter->value.fields())
485                     toEscape.addVoid(fieldEntry.value);
486             } else {
487                 mergePointerSets(allocationEntry.value.fields(), allocationIter->value.fields(), toEscape);
488                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
489             }
490         }
491
492         mergePointerSets(m_pointers, other.m_pointers, toEscape);
493
494         for (Node* identifier : toEscape)
495             escapeAllocation(identifier);
496
497         if (!ASSERT_DISABLED) {
498             for (const auto& entry : m_allocations)
499                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
500         }
501
502         // If there is no remaining pointer to an allocation, we can
503         // remove it. This should only happen for escaped allocations,
504         // because we only merge liveness-pruned heaps in the first
505         // place.
506         prune();
507
508         assertIsValid();
509     }
510
511     void pruneByLiveness(const NodeSet& live)
512     {
513         m_pointers.removeIf(
514             [&] (const auto& entry) {
515                 return !live.contains(entry.key);
516             });
517         prune();
518     }
519
520     void assertIsValid() const
521     {
522         if (ASSERT_DISABLED)
523             return;
524
525         // Pointers should point to an actual allocation
526         for (const auto& entry : m_pointers) {
527             ASSERT_UNUSED(entry, entry.value);
528             ASSERT(m_allocations.contains(entry.value));
529         }
530
531         for (const auto& allocationEntry : m_allocations) {
532             // Fields should point to an actual allocation
533             for (const auto& fieldEntry : allocationEntry.value.fields()) {
534                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
535                 ASSERT(m_allocations.contains(fieldEntry.value));
536             }
537         }
538     }
539
540     bool operator==(const LocalHeap& other) const
541     {
542         assertIsValid();
543         other.assertIsValid();
544         return m_allocations == other.m_allocations
545             && m_pointers == other.m_pointers;
546     }
547
548     bool operator!=(const LocalHeap& other) const
549     {
550         return !(*this == other);
551     }
552
553     const HashMap<Node*, Allocation>& allocations() const
554     {
555         return m_allocations;
556     }
557
558     const HashMap<Node*, Node*>& pointers() const
559     {
560         return m_pointers;
561     }
562
563     void dump(PrintStream& out) const
564     {
565         out.print("  Allocations:\n");
566         for (const auto& entry : m_allocations)
567             out.print("    #", entry.key, ": ", entry.value, "\n");
568         out.print("  Pointers:\n");
569         for (const auto& entry : m_pointers)
570             out.print("    ", entry.key, " => #", entry.value, "\n");
571     }
572
573     bool reached() const
574     {
575         return m_reached;
576     }
577
578     void setReached()
579     {
580         m_reached = true;
581     }
582
583 private:
584     // When we merge two heaps, we escape all fields of allocations,
585     // unless they point to the same thing in both heaps.
586     // The reason for this is that it allows us not to do extra work
587     // for diamond graphs where we would otherwise have to check
588     // whether we have a single definition or not, which would be
589     // cumbersome.
590     //
591     // Note that we should try to unify nodes even when they are not
592     // from the same allocation; for instance we should be able to
593     // completely eliminate all allocations from the following graph:
594     //
595     // Block #0
596     //  0: NewObject({})
597     //  -: Branch(#1, #2)
598     //
599     // Block #1
600     //  1: NewObject({})
601     //  -: PutByOffset(@1, "left", val)
602     //  -: PutStructure(@1, {val:0})
603     //  -: PutByOffset(@0, @1, x)
604     //  -: PutStructure(@0, {x:0})
605     //  -: Jump(#3)
606     //
607     // Block #2
608     //  2: NewObject({})
609     //  -: PutByOffset(@2, "right", val)
610     //  -: PutStructure(@2, {val:0})
611     //  -: PutByOffset(@0, @2, x)
612     //  -: PutStructure(@0, {x:0})
613     //  -: Jump(#3)
614     //
615     // Block #3:
616     //  3: GetByOffset(@0, x)
617     //  4: GetByOffset(@3, val)
618     //  -: Return(@4)
619     template<typename Key>
620     static void mergePointerSets(HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their, NodeSet& toEscape)
621     {
622         auto escape = [&] (Node* identifier) {
623             toEscape.addVoid(identifier);
624         };
625
626         for (const auto& entry : their) {
627             if (!my.contains(entry.key))
628                 escape(entry.value);
629         }
630         my.removeIf([&] (const auto& entry) {
631             auto iter = their.find(entry.key);
632             if (iter == their.end()) {
633                 escape(entry.value);
634                 return true;
635             }
636             if (iter->value != entry.value) {
637                 escape(entry.value);
638                 escape(iter->value);
639                 return true;
640             }
641             return false;
642         });
643     }
644
645     void escapeAllocation(Node* identifier)
646     {
647         Allocation& allocation = getAllocation(identifier);
648         if (allocation.isEscapedAllocation())
649             return;
650
651         Allocation unescaped = WTFMove(allocation);
652         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
653
654         for (const auto& entry : unescaped.fields())
655             escapeAllocation(entry.value);
656
657         if (m_wantEscapees)
658             m_escapees.add(unescaped.identifier(), WTFMove(unescaped));
659     }
660
661     void prune()
662     {
663         NodeSet reachable;
664         for (const auto& entry : m_pointers)
665             reachable.addVoid(entry.value);
666
667         // Repeatedly mark as reachable allocations in fields of other
668         // reachable allocations
669         {
670             Vector<Node*> worklist;
671             worklist.appendRange(reachable.begin(), reachable.end());
672
673             while (!worklist.isEmpty()) {
674                 Node* identifier = worklist.takeLast();
675                 Allocation& allocation = m_allocations.find(identifier)->value;
676                 for (const auto& entry : allocation.fields()) {
677                     if (reachable.add(entry.value).isNewEntry)
678                         worklist.append(entry.value);
679                 }
680             }
681         }
682
683         // Remove unreachable allocations
684         m_allocations.removeIf(
685             [&] (const auto& entry) {
686                 return !reachable.contains(entry.key);
687             });
688     }
689
690     bool m_reached = false;
691     HashMap<Node*, Node*> m_pointers;
692     HashMap<Node*, Allocation> m_allocations;
693
694     bool m_wantEscapees = false;
695     HashMap<Node*, Allocation> m_escapees;
696 };
697
698 class ObjectAllocationSinkingPhase : public Phase {
699 public:
700     ObjectAllocationSinkingPhase(Graph& graph)
701         : Phase(graph, "object allocation elimination")
702         , m_pointerSSA(graph)
703         , m_allocationSSA(graph)
704         , m_insertionSet(graph)
705     {
706     }
707
708     bool run()
709     {
710         ASSERT(m_graph.m_form == SSA);
711         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
712
713         if (!performSinking())
714             return false;
715
716         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
717             dataLog("Graph after elimination:\n");
718             m_graph.dump();
719         }
720
721         return true;
722     }
723
724 private:
725     bool performSinking()
726     {
727         m_graph.computeRefCounts();
728         m_graph.initializeNodeOwners();
729         m_graph.ensureSSADominators();
730         performLivenessAnalysis(m_graph);
731         performOSRAvailabilityAnalysis(m_graph);
732         m_combinedLiveness = CombinedLiveness(m_graph);
733
734         CString graphBeforeSinking;
735         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
736             StringPrintStream out;
737             m_graph.dump(out);
738             graphBeforeSinking = out.toCString();
739         }
740
741         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
742             dataLog("Graph before elimination:\n");
743             m_graph.dump();
744         }
745
746         performAnalysis();
747
748         if (!determineSinkCandidates())
749             return false;
750
751         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
752             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
753                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
754                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
755             }
756         }
757
758         promoteLocalHeap();
759         removeICStatusFilters();
760
761         if (Options::validateGraphAtEachPhase())
762             DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
763         return true;
764     }
765
766     void performAnalysis()
767     {
768         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
769         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
770
771         bool changed;
772         do {
773             if (DFGObjectAllocationSinkingPhaseInternal::verbose)
774                 dataLog("Doing iteration of escape analysis.\n");
775             changed = false;
776
777             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
778                 m_heapAtHead[block].setReached();
779                 m_heap = m_heapAtHead[block];
780
781                 for (Node* node : *block) {
782                     handleNode(
783                         node,
784                         [] (PromotedHeapLocation, LazyNode) { },
785                         [&] (PromotedHeapLocation) -> Node* {
786                             return nullptr;
787                         });
788                 }
789
790                 if (m_heap == m_heapAtTail[block])
791                     continue;
792
793                 m_heapAtTail[block] = m_heap;
794                 changed = true;
795
796                 m_heap.assertIsValid();
797
798                 // We keep only pointers that are live, and only
799                 // allocations that are either live, pointed to by a
800                 // live pointer, or (recursively) stored in a field of
801                 // a live allocation.
802                 //
803                 // This means we can accidentaly leak non-dominating
804                 // nodes into the successor. However, due to the
805                 // non-dominance property, we are guaranteed that the
806                 // successor has at least one predecessor that is not
807                 // dominated either: this means any reference to a
808                 // non-dominating allocation in the successor will
809                 // trigger an escape and get pruned during the merge.
810                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
811
812                 for (BasicBlock* successorBlock : block->successors())
813                     m_heapAtHead[successorBlock].merge(m_heap);
814             }
815         } while (changed);
816     }
817
818     template<typename WriteFunctor, typename ResolveFunctor>
819     void handleNode(
820         Node* node,
821         const WriteFunctor& heapWrite,
822         const ResolveFunctor& heapResolve)
823     {
824         m_heap.assertIsValid();
825         ASSERT(m_heap.takeEscapees().isEmpty());
826
827         Allocation* target = nullptr;
828         HashMap<PromotedLocationDescriptor, LazyNode> writes;
829         PromotedLocationDescriptor exactRead;
830
831         switch (node->op()) {
832         case NewObject:
833             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
834             target->setStructures(node->structure());
835             writes.add(
836                 StructurePLoc, LazyNode(m_graph.freeze(node->structure().get())));
837             break;
838
839         case NewFunction:
840         case NewGeneratorFunction:
841         case NewAsyncGeneratorFunction:
842         case NewAsyncFunction: {
843             if (isStillValid(node->castOperand<FunctionExecutable*>()->singletonFunction())) {
844                 m_heap.escape(node->child1().node());
845                 break;
846             }
847
848             if (node->op() == NewGeneratorFunction)
849                 target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction);
850             else if (node->op() == NewAsyncFunction)
851                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncFunction);
852             else if (node->op() == NewAsyncGeneratorFunction)
853                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncGeneratorFunction);
854             else
855                 target = &m_heap.newAllocation(node, Allocation::Kind::Function);
856
857             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
858             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
859             break;
860         }
861
862         case NewRegexp: {
863             target = &m_heap.newAllocation(node, Allocation::Kind::RegExpObject);
864
865             writes.add(RegExpObjectRegExpPLoc, LazyNode(node->cellOperand()));
866             writes.add(RegExpObjectLastIndexPLoc, LazyNode(node->child1().node()));
867             break;
868         }
869
870         case CreateActivation: {
871             if (isStillValid(node->castOperand<SymbolTable*>()->singletonScope())) {
872                 m_heap.escape(node->child1().node());
873                 break;
874             }
875             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
876             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
877             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
878             {
879                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
880                 ConcurrentJSLocker locker(symbolTable->m_lock);
881                 LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
882                 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
883                     writes.add(
884                         PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
885                         initialValue);
886                 }
887             }
888             break;
889         }
890
891         case PutStructure:
892             target = m_heap.onlyLocalAllocation(node->child1().node());
893             if (target && target->isObjectAllocation()) {
894                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next.get()))));
895                 target->setStructures(node->transition()->next);
896             } else
897                 m_heap.escape(node->child1().node());
898             break;
899
900         case CheckStructureOrEmpty:
901         case CheckStructure: {
902             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
903             if (allocation && allocation->isObjectAllocation()) {
904                 RegisteredStructureSet filteredStructures = allocation->structures();
905                 filteredStructures.filter(node->structureSet());
906                 if (filteredStructures.isEmpty()) {
907                     // FIXME: Write a test for this:
908                     // https://bugs.webkit.org/show_bug.cgi?id=174322
909                     m_heap.escape(node->child1().node());
910                     break;
911                 }
912                 allocation->setStructures(filteredStructures);
913                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
914                     node->convertToCheckStructureImmediate(value);
915             } else
916                 m_heap.escape(node->child1().node());
917             break;
918         }
919
920         case GetByOffset:
921         case GetGetterSetterByOffset:
922             target = m_heap.onlyLocalAllocation(node->child2().node());
923             if (target && target->isObjectAllocation()) {
924                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
925                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
926             } else {
927                 m_heap.escape(node->child1().node());
928                 m_heap.escape(node->child2().node());
929             }
930             break;
931
932         case MultiGetByOffset: {
933             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
934             if (allocation && allocation->isObjectAllocation()) {
935                 MultiGetByOffsetData& data = node->multiGetByOffsetData();
936                 RegisteredStructureSet validStructures;
937                 bool hasInvalidStructures = false;
938                 for (const auto& multiGetByOffsetCase : data.cases) {
939                     if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
940                         continue;
941
942                     switch (multiGetByOffsetCase.method().kind()) {
943                     case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
944                     case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
945                         hasInvalidStructures = true;
946                         break;
947
948                     case GetByOffsetMethod::Load: // We're good
949                         validStructures.merge(multiGetByOffsetCase.set());
950                         break;
951
952                     default:
953                         RELEASE_ASSERT_NOT_REACHED();
954                     }
955                 }
956                 if (hasInvalidStructures || validStructures.isEmpty()) {
957                     m_heap.escape(node->child1().node());
958                     break;
959                 }
960                 unsigned identifierNumber = data.identifierNumber;
961                 PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
962                 if (Node* value = heapResolve(location)) {
963                     if (allocation->structures().isSubsetOf(validStructures))
964                         node->replaceWithWithoutChecks(value);
965                     else {
966                         Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
967                         ASSERT(structure);
968                         allocation->filterStructures(validStructures);
969                         node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
970                         node->convertToCheckStructureImmediate(structure);
971                         node->setReplacement(value);
972                     }
973                 } else if (!allocation->structures().isSubsetOf(validStructures)) {
974                     // Even though we don't need the result here, we still need
975                     // to make the call to tell our caller that we could need
976                     // the StructurePLoc.
977                     // The reason for this is that when we decide not to sink a
978                     // node, we will still lower any read to its fields before
979                     // it escapes (which are usually reads across a function
980                     // call that DFGClobberize can't handle) - but we only do
981                     // this for PromotedHeapLocations that we have seen read
982                     // during the analysis!
983                     heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
984                     allocation->filterStructures(validStructures);
985                 }
986                 Node* identifier = allocation->get(location.descriptor());
987                 if (identifier)
988                     m_heap.newPointer(node, identifier);
989             } else
990                 m_heap.escape(node->child1().node());
991             break;
992         }
993
994         case PutByOffset:
995             target = m_heap.onlyLocalAllocation(node->child2().node());
996             if (target && target->isObjectAllocation()) {
997                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
998                 writes.add(
999                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
1000                     LazyNode(node->child3().node()));
1001             } else {
1002                 m_heap.escape(node->child1().node());
1003                 m_heap.escape(node->child2().node());
1004                 m_heap.escape(node->child3().node());
1005             }
1006             break;
1007
1008         case GetClosureVar:
1009             target = m_heap.onlyLocalAllocation(node->child1().node());
1010             if (target && target->isActivationAllocation()) {
1011                 exactRead =
1012                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
1013             } else
1014                 m_heap.escape(node->child1().node());
1015             break;
1016
1017         case PutClosureVar:
1018             target = m_heap.onlyLocalAllocation(node->child1().node());
1019             if (target && target->isActivationAllocation()) {
1020                 writes.add(
1021                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
1022                     LazyNode(node->child2().node()));
1023             } else {
1024                 m_heap.escape(node->child1().node());
1025                 m_heap.escape(node->child2().node());
1026             }
1027             break;
1028
1029         case SkipScope:
1030             target = m_heap.onlyLocalAllocation(node->child1().node());
1031             if (target && target->isActivationAllocation())
1032                 exactRead = ActivationScopePLoc;
1033             else
1034                 m_heap.escape(node->child1().node());
1035             break;
1036
1037         case GetExecutable:
1038             target = m_heap.onlyLocalAllocation(node->child1().node());
1039             if (target && target->isFunctionAllocation())
1040                 exactRead = FunctionExecutablePLoc;
1041             else
1042                 m_heap.escape(node->child1().node());
1043             break;
1044
1045         case GetScope:
1046             target = m_heap.onlyLocalAllocation(node->child1().node());
1047             if (target && target->isFunctionAllocation())
1048                 exactRead = FunctionActivationPLoc;
1049             else
1050                 m_heap.escape(node->child1().node());
1051             break;
1052
1053         case GetRegExpObjectLastIndex:
1054             target = m_heap.onlyLocalAllocation(node->child1().node());
1055             if (target && target->isRegExpObjectAllocation())
1056                 exactRead = RegExpObjectLastIndexPLoc;
1057             else
1058                 m_heap.escape(node->child1().node());
1059             break;
1060
1061         case SetRegExpObjectLastIndex:
1062             target = m_heap.onlyLocalAllocation(node->child1().node());
1063             if (target && target->isRegExpObjectAllocation()) {
1064                 writes.add(
1065                     PromotedLocationDescriptor(RegExpObjectLastIndexPLoc),
1066                     LazyNode(node->child2().node()));
1067             } else {
1068                 m_heap.escape(node->child1().node());
1069                 m_heap.escape(node->child2().node());
1070             }
1071             break;
1072
1073         case Check:
1074         case CheckVarargs:
1075             m_graph.doToChildren(
1076                 node,
1077                 [&] (Edge edge) {
1078                     if (edge.willNotHaveCheck())
1079                         return;
1080
1081                     if (alreadyChecked(edge.useKind(), SpecObject))
1082                         return;
1083
1084                     m_heap.escape(edge.node());
1085                 });
1086             break;
1087
1088         case MovHint:
1089         case PutHint:
1090             // Handled by OSR availability analysis
1091             break;
1092             
1093         case FilterCallLinkStatus:
1094         case FilterGetByIdStatus:
1095         case FilterPutByIdStatus:
1096         case FilterInByIdStatus:
1097             break;
1098
1099         default:
1100             m_graph.doToChildren(
1101                 node,
1102                 [&] (Edge edge) {
1103                     m_heap.escape(edge.node());
1104                 });
1105             break;
1106         }
1107
1108         if (exactRead) {
1109             ASSERT(target);
1110             ASSERT(writes.isEmpty());
1111             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1112                 ASSERT(!value->replacement());
1113                 node->replaceWith(m_graph, value);
1114             }
1115             Node* identifier = target->get(exactRead);
1116             if (identifier)
1117                 m_heap.newPointer(node, identifier);
1118         }
1119
1120         for (auto entry : writes) {
1121             ASSERT(target);
1122             if (entry.value.isNode())
1123                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1124             else
1125                 target->remove(entry.key);
1126             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1127         }
1128
1129         m_heap.assertIsValid();
1130     }
1131
1132     bool determineSinkCandidates()
1133     {
1134         m_sinkCandidates.clear();
1135         m_materializationToEscapee.clear();
1136         m_materializationSiteToMaterializations.clear();
1137         m_materializationSiteToRecoveries.clear();
1138         m_materializationSiteToHints.clear();
1139
1140         // Logically we wish to consider every allocation and sink
1141         // it. However, it is probably not profitable to sink an
1142         // allocation that will always escape. So, we only sink an
1143         // allocation if one of the following is true:
1144         //
1145         // 1) There exists a basic block with only backwards outgoing
1146         //    edges (or no outgoing edges) in which the node wasn't
1147         //    materialized. This is meant to catch
1148         //    effectively-infinite loops in which we don't need to
1149         //    have allocated the object.
1150         //
1151         // 2) There exists a basic block at the tail of which the node
1152         //    is dead and not materialized.
1153         //
1154         // 3) The sum of execution counts of the materializations is
1155         //    less than the sum of execution counts of the original
1156         //    node.
1157         //
1158         // We currently implement only rule #2.
1159         // FIXME: Implement the two other rules.
1160         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1161         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1162         //
1163         // However, these rules allow for a sunk object to be put into
1164         // a non-sunk one, which we don't support. We could solve this
1165         // by supporting PutHints on local allocations, making these
1166         // objects only partially correct, and we would need to adapt
1167         // the OSR availability analysis and OSR exit to handle
1168         // this. This would be totally doable, but would create a
1169         // super rare, and thus bug-prone, code path.
1170         // So, instead, we need to implement one of the following
1171         // closure rules:
1172         //
1173         // 1) If we put a sink candidate into a local allocation that
1174         //    is not a sink candidate, change our minds and don't
1175         //    actually sink the sink candidate.
1176         //
1177         // 2) If we put a sink candidate into a local allocation, that
1178         //    allocation becomes a sink candidate as well.
1179         //
1180         // We currently choose to implement closure rule #2.
1181         HashMap<Node*, Vector<Node*>> dependencies;
1182         bool hasUnescapedReads = false;
1183         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1184             m_heap = m_heapAtHead[block];
1185
1186             for (Node* node : *block) {
1187                 handleNode(
1188                     node,
1189                     [&] (PromotedHeapLocation location, LazyNode value) {
1190                         if (!value.isNode())
1191                             return;
1192
1193                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1194                         if (allocation && !allocation->isEscapedAllocation())
1195                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1196                     },
1197                     [&] (PromotedHeapLocation) -> Node* {
1198                         hasUnescapedReads = true;
1199                         return nullptr;
1200                     });
1201             }
1202
1203             // The sink candidates are initially the unescaped
1204             // allocations dying at tail of blocks
1205             NodeSet allocations;
1206             for (const auto& entry : m_heap.allocations()) {
1207                 if (!entry.value.isEscapedAllocation())
1208                     allocations.addVoid(entry.key);
1209             }
1210
1211             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1212
1213             for (Node* identifier : allocations) {
1214                 if (!m_heap.isAllocation(identifier))
1215                     m_sinkCandidates.addVoid(identifier);
1216             }
1217         }
1218
1219         // Ensure that the set of sink candidates is closed for put operations
1220         Vector<Node*> worklist;
1221         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1222
1223         while (!worklist.isEmpty()) {
1224             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1225                 if (m_sinkCandidates.add(identifier).isNewEntry)
1226                     worklist.append(identifier);
1227             }
1228         }
1229
1230         if (m_sinkCandidates.isEmpty())
1231             return hasUnescapedReads;
1232
1233         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1234             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1235
1236         // Create the materialization nodes
1237         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1238             m_heap = m_heapAtHead[block];
1239             m_heap.setWantEscapees();
1240
1241             for (Node* node : *block) {
1242                 handleNode(
1243                     node,
1244                     [] (PromotedHeapLocation, LazyNode) { },
1245                     [] (PromotedHeapLocation) -> Node* {
1246                         return nullptr;
1247                     });
1248                 auto escapees = m_heap.takeEscapees();
1249                 if (!escapees.isEmpty())
1250                     placeMaterializations(escapees, node);
1251             }
1252
1253             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1254
1255             {
1256                 HashMap<Node*, Allocation> escapingOnEdge;
1257                 for (const auto& entry : m_heap.allocations()) {
1258                     if (entry.value.isEscapedAllocation())
1259                         continue;
1260
1261                     bool mustEscape = false;
1262                     for (BasicBlock* successorBlock : block->successors()) {
1263                         if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1264                             || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1265                             mustEscape = true;
1266                     }
1267
1268                     if (mustEscape)
1269                         escapingOnEdge.add(entry.key, entry.value);
1270                 }
1271                 placeMaterializations(WTFMove(escapingOnEdge), block->terminal());
1272             }
1273         }
1274
1275         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1276     }
1277
1278     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1279     {
1280         // We don't create materializations if the escapee is not a
1281         // sink candidate
1282         escapees.removeIf(
1283             [&] (const auto& entry) {
1284                 return !m_sinkCandidates.contains(entry.key);
1285             });
1286         if (escapees.isEmpty())
1287             return;
1288
1289         // First collect the hints that will be needed when the node
1290         // we materialize is still stored into other unescaped sink candidates.
1291         // The way to interpret this vector is:
1292         //
1293         // PromotedHeapLocation(NotEscapedAllocation, field) = identifierAllocation
1294         //
1295         // e.g:
1296         // PromotedHeapLocation(@PhantomNewFunction, FunctionActivationPLoc) = IdentifierOf(@MaterializeCreateActivation)
1297         // or:
1298         // PromotedHeapLocation(@PhantomCreateActivation, ClosureVarPLoc(x)) = IdentifierOf(@NewFunction)
1299         //
1300         // When the rhs of the `=` is to be materialized at this `where` point in the program
1301         // and IdentifierOf(Materialization) is the original sunken allocation of the materialization.
1302         //
1303         // The reason we need to collect all the `identifiers` here is that
1304         // we may materialize multiple versions of the allocation along control
1305         // flow edges. We will PutHint these values along those edges. However,
1306         // we also need to PutHint them when we join and have a Phi of the allocations.
1307         Vector<std::pair<PromotedHeapLocation, Node*>> hints;
1308         for (const auto& entry : m_heap.allocations()) {
1309             if (escapees.contains(entry.key))
1310                 continue;
1311
1312             for (const auto& field : entry.value.fields()) {
1313                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1314                 auto iter = escapees.find(field.value);
1315                 if (iter != escapees.end()) {
1316                     ASSERT(m_sinkCandidates.contains(field.value));
1317                     hints.append(std::make_pair(PromotedHeapLocation(entry.key, field.key), field.value));
1318                 }
1319             }
1320         }
1321
1322         // Now we need to order the materialization. Any order is
1323         // valid (as long as we materialize a node first if it is
1324         // needed for the materialization of another node, e.g. a
1325         // function's activation must be materialized before the
1326         // function itself), but we want to try minimizing the number
1327         // of times we have to place Puts to close cycles after a
1328         // materialization. In other words, we are trying to find the
1329         // minimum number of materializations to remove from the
1330         // materialization graph to make it a DAG, known as the
1331         // (vertex) feedback set problem. Unfortunately, this is a
1332         // NP-hard problem, which we don't want to solve exactly.
1333         //
1334         // Instead, we use a simple greedy procedure, that procedes as
1335         // follow:
1336         //  - While there is at least one node with no outgoing edge
1337         //    amongst the remaining materializations, materialize it
1338         //    first
1339         //
1340         //  - Similarily, while there is at least one node with no
1341         //    incoming edge amongst the remaining materializations,
1342         //    materialize it last.
1343         //
1344         //  - When both previous conditions are false, we have an
1345         //    actual cycle, and we need to pick a node to
1346         //    materialize. We try greedily to remove the "pressure" on
1347         //    the remaining nodes by choosing the node with maximum
1348         //    |incoming edges| * |outgoing edges| as a measure of how
1349         //    "central" to the graph it is. We materialize it first,
1350         //    so that all the recoveries will be Puts of things into
1351         //    it (rather than Puts of the materialization into other
1352         //    objects), which means we will have a single
1353         //    StoreBarrier.
1354
1355
1356         // Compute dependencies between materializations
1357         HashMap<Node*, NodeSet> dependencies;
1358         HashMap<Node*, NodeSet> reverseDependencies;
1359         HashMap<Node*, NodeSet> forMaterialization;
1360         for (const auto& entry : escapees) {
1361             auto& myDependencies = dependencies.add(entry.key, NodeSet()).iterator->value;
1362             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, NodeSet()).iterator->value;
1363             reverseDependencies.add(entry.key, NodeSet());
1364             for (const auto& field : entry.value.fields()) {
1365                 if (escapees.contains(field.value) && field.value != entry.key) {
1366                     myDependencies.addVoid(field.value);
1367                     reverseDependencies.add(field.value, NodeSet()).iterator->value.addVoid(entry.key);
1368                     if (field.key.neededForMaterialization())
1369                         myDependenciesForMaterialization.addVoid(field.value);
1370                 }
1371             }
1372         }
1373
1374         // Helper function to update the materialized set and the
1375         // dependencies
1376         NodeSet materialized;
1377         auto materialize = [&] (Node* identifier) {
1378             materialized.addVoid(identifier);
1379             for (Node* dep : dependencies.get(identifier))
1380                 reverseDependencies.find(dep)->value.remove(identifier);
1381             for (Node* rdep : reverseDependencies.get(identifier)) {
1382                 dependencies.find(rdep)->value.remove(identifier);
1383                 forMaterialization.find(rdep)->value.remove(identifier);
1384             }
1385             dependencies.remove(identifier);
1386             reverseDependencies.remove(identifier);
1387             forMaterialization.remove(identifier);
1388         };
1389
1390         // Nodes without remaining unmaterialized fields will be
1391         // materialized first - amongst the remaining unmaterialized
1392         // nodes
1393         StdList<Allocation> toMaterialize;
1394         auto firstPos = toMaterialize.begin();
1395         auto materializeFirst = [&] (Allocation&& allocation) {
1396             materialize(allocation.identifier());
1397             // We need to insert *after* the current position
1398             if (firstPos != toMaterialize.end())
1399                 ++firstPos;
1400             firstPos = toMaterialize.insert(firstPos, WTFMove(allocation));
1401         };
1402
1403         // Nodes that no other unmaterialized node points to will be
1404         // materialized last - amongst the remaining unmaterialized
1405         // nodes
1406         auto lastPos = toMaterialize.end();
1407         auto materializeLast = [&] (Allocation&& allocation) {
1408             materialize(allocation.identifier());
1409             lastPos = toMaterialize.insert(lastPos, WTFMove(allocation));
1410         };
1411
1412         // These are the promoted locations that contains some of the
1413         // allocations we are currently escaping. If they are a location on
1414         // some other allocation we are currently materializing, we will need
1415         // to "recover" their value with a real put once the corresponding
1416         // allocation is materialized; if they are a location on some other
1417         // not-yet-materialized allocation, we will need a PutHint.
1418         Vector<PromotedHeapLocation> toRecover;
1419
1420         // This loop does the actual cycle breaking
1421         while (!escapees.isEmpty()) {
1422             materialized.clear();
1423
1424             // Materialize nodes that won't require recoveries if we can
1425             for (auto& entry : escapees) {
1426                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1427                     continue;
1428
1429                 if (dependencies.find(entry.key)->value.isEmpty()) {
1430                     materializeFirst(WTFMove(entry.value));
1431                     continue;
1432                 }
1433
1434                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1435                     materializeLast(WTFMove(entry.value));
1436                     continue;
1437                 }
1438             }
1439
1440             // We reach this only if there is an actual cycle that needs
1441             // breaking. Because we do not want to solve a NP-hard problem
1442             // here, we just heuristically pick a node and materialize it
1443             // first.
1444             if (materialized.isEmpty()) {
1445                 uint64_t maxEvaluation = 0;
1446                 Allocation* bestAllocation = nullptr;
1447                 for (auto& entry : escapees) {
1448                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1449                         continue;
1450
1451                     uint64_t evaluation =
1452                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1453                     if (evaluation > maxEvaluation) {
1454                         maxEvaluation = evaluation;
1455                         bestAllocation = &entry.value;
1456                     }
1457                 }
1458                 RELEASE_ASSERT(maxEvaluation > 0);
1459
1460                 materializeFirst(WTFMove(*bestAllocation));
1461             }
1462             RELEASE_ASSERT(!materialized.isEmpty());
1463
1464             for (Node* identifier : materialized)
1465                 escapees.remove(identifier);
1466         }
1467
1468         materialized.clear();
1469
1470         NodeSet escaped;
1471         for (const Allocation& allocation : toMaterialize)
1472             escaped.addVoid(allocation.identifier());
1473         for (const Allocation& allocation : toMaterialize) {
1474             for (const auto& field : allocation.fields()) {
1475                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1476                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1477             }
1478             materialized.addVoid(allocation.identifier());
1479         }
1480
1481         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1482             where, Vector<Node*>()).iterator->value;
1483
1484         for (const Allocation& allocation : toMaterialize) {
1485             Node* materialization = createMaterialization(allocation, where);
1486             materializations.append(materialization);
1487             m_materializationToEscapee.add(materialization, allocation.identifier());
1488         }
1489
1490         if (!toRecover.isEmpty()) {
1491             m_materializationSiteToRecoveries.add(
1492                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1493         }
1494
1495         // The hints need to be after the "real" recoveries so that we
1496         // don't hint not-yet-complete objects
1497         m_materializationSiteToHints.add(
1498             where, Vector<std::pair<PromotedHeapLocation, Node*>>()).iterator->value.appendVector(hints);
1499     }
1500
1501     Node* createMaterialization(const Allocation& allocation, Node* where)
1502     {
1503         // FIXME: This is the only place where we actually use the
1504         // fact that an allocation's identifier is indeed the node
1505         // that created the allocation.
1506         switch (allocation.kind()) {
1507         case Allocation::Kind::Object: {
1508             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1509
1510             return m_graph.addNode(
1511                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1512                 where->origin.withSemantic(allocation.identifier()->origin.semantic),
1513                 OpInfo(m_graph.addStructureSet(allocation.structures())), OpInfo(data), 0, 0);
1514         }
1515
1516         case Allocation::Kind::AsyncGeneratorFunction:
1517         case Allocation::Kind::AsyncFunction:
1518         case Allocation::Kind::GeneratorFunction:
1519         case Allocation::Kind::Function: {
1520             FrozenValue* executable = allocation.identifier()->cellOperand();
1521             
1522             NodeType nodeType;
1523             switch (allocation.kind()) {
1524             case Allocation::Kind::GeneratorFunction:
1525                 nodeType = NewGeneratorFunction;
1526                 break;
1527             case Allocation::Kind::AsyncGeneratorFunction:
1528                 nodeType = NewAsyncGeneratorFunction;
1529                 break;
1530             case Allocation::Kind::AsyncFunction:
1531                 nodeType = NewAsyncFunction;
1532                 break;
1533             default:
1534                 nodeType = NewFunction;
1535             }
1536
1537             return m_graph.addNode(
1538                 allocation.identifier()->prediction(), nodeType,
1539                 where->origin.withSemantic(
1540                     allocation.identifier()->origin.semantic),
1541                 OpInfo(executable));
1542         }
1543
1544         case Allocation::Kind::Activation: {
1545             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1546             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1547
1548             return m_graph.addNode(
1549                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1550                 where->origin.withSemantic(
1551                     allocation.identifier()->origin.semantic),
1552                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1553         }
1554
1555         case Allocation::Kind::RegExpObject: {
1556             FrozenValue* regExp = allocation.identifier()->cellOperand();
1557             return m_graph.addNode(
1558                 allocation.identifier()->prediction(), NewRegexp,
1559                 where->origin.withSemantic(
1560                     allocation.identifier()->origin.semantic),
1561                 OpInfo(regExp));
1562         }
1563
1564         default:
1565             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1566         }
1567     }
1568
1569     void promoteLocalHeap()
1570     {
1571         // Collect the set of heap locations that we will be operating
1572         // over.
1573         HashSet<PromotedHeapLocation> locations;
1574         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1575             m_heap = m_heapAtHead[block];
1576
1577             for (Node* node : *block) {
1578                 handleNode(
1579                     node,
1580                     [&] (PromotedHeapLocation location, LazyNode) {
1581                         // If the location is not on a sink candidate,
1582                         // we only sink it if it is read
1583                         if (m_sinkCandidates.contains(location.base()))
1584                             locations.addVoid(location);
1585                     },
1586                     [&] (PromotedHeapLocation location) -> Node* {
1587                         locations.addVoid(location);
1588                         return nullptr;
1589                     });
1590             }
1591         }
1592
1593         // Figure out which locations belong to which allocations.
1594         m_locationsForAllocation.clear();
1595         for (PromotedHeapLocation location : locations) {
1596             auto result = m_locationsForAllocation.add(
1597                 location.base(),
1598                 Vector<PromotedHeapLocation>());
1599             ASSERT(!result.iterator->value.contains(location));
1600             result.iterator->value.append(location);
1601         }
1602
1603         m_pointerSSA.reset();
1604         m_allocationSSA.reset();
1605
1606         // Collect the set of "variables" that we will be sinking.
1607         m_locationToVariable.clear();
1608         m_nodeToVariable.clear();
1609         Vector<Node*> indexToNode;
1610         Vector<PromotedHeapLocation> indexToLocation;
1611
1612         for (Node* index : m_sinkCandidates) {
1613             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1614             m_nodeToVariable.add(index, variable);
1615             ASSERT(indexToNode.size() == variable->index());
1616             indexToNode.append(index);
1617         }
1618
1619         for (PromotedHeapLocation location : locations) {
1620             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1621             m_locationToVariable.add(location, variable);
1622             ASSERT(indexToLocation.size() == variable->index());
1623             indexToLocation.append(location);
1624         }
1625
1626         // We insert all required constants at top of block 0 so that
1627         // they are inserted only once and we don't clutter the graph
1628         // with useless constants everywhere
1629         HashMap<FrozenValue*, Node*> lazyMapping;
1630         if (!m_bottom)
1631             m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
1632
1633         Vector<HashSet<PromotedHeapLocation>> hintsForPhi(m_sinkCandidates.size());
1634
1635         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1636             m_heap = m_heapAtHead[block];
1637
1638             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1639                 Node* node = block->at(nodeIndex);
1640
1641                 // Some named properties can be added conditionally,
1642                 // and that would necessitate bottoms
1643                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1644                     if (location.kind() != NamedPropertyPLoc)
1645                         continue;
1646
1647                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1648                     m_pointerSSA.newDef(variable, block, m_bottom);
1649                 }
1650
1651                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1652                     Node* escapee = m_materializationToEscapee.get(materialization);
1653                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1654                 }
1655
1656                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node)) {
1657                     PromotedHeapLocation location = pair.first;
1658                     Node* identifier = pair.second;
1659                     // We're materializing `identifier` at this point, and the unmaterialized
1660                     // version is inside `location`. We track which SSA variable this belongs
1661                     // to in case we also need a PutHint for the Phi.
1662                     if (UNLIKELY(validationEnabled())) {
1663                         RELEASE_ASSERT(m_sinkCandidates.contains(location.base()));
1664                         RELEASE_ASSERT(m_sinkCandidates.contains(identifier));
1665
1666                         bool found = false;
1667                         for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1668                             // We're materializing `identifier` here. This asserts that this is indeed the case.
1669                             if (m_materializationToEscapee.get(materialization) == identifier) {
1670                                 found = true;
1671                                 break;
1672                             }
1673                         }
1674                         RELEASE_ASSERT(found);
1675                     }
1676
1677                     SSACalculator::Variable* variable = m_nodeToVariable.get(identifier);
1678                     hintsForPhi[variable->index()].addVoid(location);
1679                 }
1680
1681                 if (m_sinkCandidates.contains(node))
1682                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1683
1684                 handleNode(
1685                     node,
1686                     [&] (PromotedHeapLocation location, LazyNode value) {
1687                         if (!locations.contains(location))
1688                             return;
1689
1690                         Node* nodeValue;
1691                         if (value.isNode())
1692                             nodeValue = value.asNode();
1693                         else {
1694                             auto iter = lazyMapping.find(value.asValue());
1695                             if (iter != lazyMapping.end())
1696                                 nodeValue = iter->value;
1697                             else {
1698                                 nodeValue = value.ensureIsNode(
1699                                     m_insertionSet, m_graph.block(0), 0);
1700                                 lazyMapping.add(value.asValue(), nodeValue);
1701                             }
1702                         }
1703
1704                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1705                         m_pointerSSA.newDef(variable, block, nodeValue);
1706                     },
1707                     [] (PromotedHeapLocation) -> Node* {
1708                         return nullptr;
1709                     });
1710             }
1711         }
1712         m_insertionSet.execute(m_graph.block(0));
1713
1714         // Run the SSA calculators to create Phis
1715         m_pointerSSA.computePhis(
1716             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1717                 PromotedHeapLocation location = indexToLocation[variable->index()];
1718
1719                 // Don't create Phi nodes for fields of dead allocations
1720                 if (!m_heapAtHead[block].isAllocation(location.base()))
1721                     return nullptr;
1722
1723                 // Don't create Phi nodes once we are escaped
1724                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1725                     return nullptr;
1726
1727                 // If we point to a single allocation, we will
1728                 // directly use its materialization
1729                 if (m_heapAtHead[block].follow(location))
1730                     return nullptr;
1731
1732                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1733                 phiNode->mergeFlags(NodeResultJS);
1734                 return phiNode;
1735             });
1736
1737         m_allocationSSA.computePhis(
1738             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1739                 Node* identifier = indexToNode[variable->index()];
1740
1741                 // Don't create Phi nodes for dead allocations
1742                 if (!m_heapAtHead[block].isAllocation(identifier))
1743                     return nullptr;
1744
1745                 // Don't create Phi nodes until we are escaped
1746                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1747                     return nullptr;
1748
1749                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1750                 phiNode->mergeFlags(NodeResultJS);
1751                 return phiNode;
1752             });
1753
1754         // Place Phis in the right places, replace all uses of any load with the appropriate
1755         // value, and create the materialization nodes.
1756         LocalOSRAvailabilityCalculator availabilityCalculator(m_graph);
1757         m_graph.clearReplacements();
1758         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1759             m_heap = m_heapAtHead[block];
1760             availabilityCalculator.beginBlock(block);
1761
1762             // These mapping tables are intended to be lazy. If
1763             // something is omitted from the table, it means that
1764             // there haven't been any local stores to the promoted
1765             // heap location (or any local materialization).
1766             m_localMapping.clear();
1767             m_escapeeToMaterialization.clear();
1768
1769             // Insert the Phi functions that we had previously
1770             // created.
1771             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1772                 SSACalculator::Variable* variable = phiDef->variable();
1773                 m_insertionSet.insert(0, phiDef->value());
1774
1775                 PromotedHeapLocation location = indexToLocation[variable->index()];
1776                 m_localMapping.set(location, phiDef->value());
1777
1778                 if (m_sinkCandidates.contains(location.base())) {
1779                     m_insertionSet.insert(
1780                         0,
1781                         location.createHint(
1782                             m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1783                 }
1784             }
1785
1786             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1787                 SSACalculator::Variable* variable = phiDef->variable();
1788                 m_insertionSet.insert(0, phiDef->value());
1789
1790                 Node* identifier = indexToNode[variable->index()];
1791                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1792                 bool canExit = false;
1793                 insertOSRHintsForUpdate(
1794                     0, block->at(0)->origin, canExit,
1795                     availabilityCalculator.m_availability, identifier, phiDef->value());
1796
1797                 for (PromotedHeapLocation location : hintsForPhi[variable->index()]) {
1798                     m_insertionSet.insert(0,
1799                         location.createHint(m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1800                     m_localMapping.set(location, phiDef->value());
1801                 }
1802             }
1803
1804             if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
1805                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1806                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1807             }
1808
1809             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1810                 Node* node = block->at(nodeIndex);
1811                 bool canExit = true;
1812                 bool nextCanExit = node->origin.exitOK;
1813                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1814                     if (location.kind() != NamedPropertyPLoc)
1815                         continue;
1816
1817                     m_localMapping.set(location, m_bottom);
1818
1819                     if (m_sinkCandidates.contains(node)) {
1820                         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1821                             dataLog("For sink candidate ", node, " found location ", location, "\n");
1822                         m_insertionSet.insert(
1823                             nodeIndex + 1,
1824                             location.createHint(
1825                                 m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
1826                     }
1827                 }
1828
1829                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1830                     materialization->origin.exitOK &= canExit;
1831                     Node* escapee = m_materializationToEscapee.get(materialization);
1832                     populateMaterialization(block, materialization, escapee);
1833                     m_escapeeToMaterialization.set(escapee, materialization);
1834                     m_insertionSet.insert(nodeIndex, materialization);
1835                     if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1836                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1837                 }
1838
1839                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1840                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
1841                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node))
1842                     m_insertionSet.insert(nodeIndex, createRecovery(block, pair.first, node, canExit));
1843
1844                 // We need to put the OSR hints after the recoveries,
1845                 // because we only want the hints once the object is
1846                 // complete
1847                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1848                     Node* escapee = m_materializationToEscapee.get(materialization);
1849                     insertOSRHintsForUpdate(
1850                         nodeIndex, node->origin, canExit,
1851                         availabilityCalculator.m_availability, escapee, materialization);
1852                 }
1853
1854                 if (node->origin.exitOK && !canExit) {
1855                     // We indicate that the exit state is fine now. It is OK because we updated the
1856                     // state above. We need to indicate this manually because the validation doesn't
1857                     // have enough information to infer that the exit state is fine.
1858                     m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
1859                 }
1860
1861                 if (m_sinkCandidates.contains(node))
1862                     m_escapeeToMaterialization.set(node, node);
1863
1864                 availabilityCalculator.executeNode(node);
1865
1866                 bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
1867
1868                 bool doLower = false;
1869                 handleNode(
1870                     node,
1871                     [&] (PromotedHeapLocation location, LazyNode value) {
1872                         if (!locations.contains(location))
1873                             return;
1874
1875                         Node* nodeValue;
1876                         if (value.isNode())
1877                             nodeValue = value.asNode();
1878                         else
1879                             nodeValue = lazyMapping.get(value.asValue());
1880
1881                         nodeValue = resolve(block, nodeValue);
1882
1883                         m_localMapping.set(location, nodeValue);
1884
1885                         if (!m_sinkCandidates.contains(location.base()))
1886                             return;
1887
1888                         doLower = true;
1889
1890                         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
1891                             dataLog("Creating hint with value ", nodeValue, " before ", node, "\n");
1892                         m_insertionSet.insert(
1893                             nodeIndex + 1,
1894                             location.createHint(
1895                                 m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
1896                     },
1897                     [&] (PromotedHeapLocation location) -> Node* {
1898                         return resolve(block, location);
1899                     });
1900
1901                 if (!nextCanExit && desiredNextExitOK) {
1902                     // We indicate that the exit state is fine now. We need to do this because we
1903                     // emitted hints that appear to invalidate the exit state.
1904                     m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
1905                 }
1906
1907                 if (m_sinkCandidates.contains(node) || doLower) {
1908                     switch (node->op()) {
1909                     case NewObject:
1910                         node->convertToPhantomNewObject();
1911                         break;
1912
1913                     case NewFunction:
1914                         node->convertToPhantomNewFunction();
1915                         break;
1916
1917                     case NewGeneratorFunction:
1918                         node->convertToPhantomNewGeneratorFunction();
1919                         break;
1920
1921                     case NewAsyncGeneratorFunction:
1922                         node->convertToPhantomNewAsyncGeneratorFunction();
1923                         break;
1924
1925                     case NewAsyncFunction:
1926                         node->convertToPhantomNewAsyncFunction();
1927                         break;
1928
1929                     case CreateActivation:
1930                         node->convertToPhantomCreateActivation();
1931                         break;
1932
1933                     case NewRegexp:
1934                         node->convertToPhantomNewRegexp();
1935                         break;
1936
1937                     default:
1938                         node->remove(m_graph);
1939                         break;
1940                     }
1941                 }
1942
1943                 m_graph.doToChildren(
1944                     node,
1945                     [&] (Edge& edge) {
1946                         edge.setNode(resolve(block, edge.node()));
1947                     });
1948             }
1949
1950             // Gotta drop some Upsilons.
1951             NodeAndIndex terminal = block->findTerminal();
1952             size_t upsilonInsertionPoint = terminal.index;
1953             NodeOrigin upsilonOrigin = terminal.node->origin;
1954             for (BasicBlock* successorBlock : block->successors()) {
1955                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1956                     Node* phiNode = phiDef->value();
1957                     SSACalculator::Variable* variable = phiDef->variable();
1958                     PromotedHeapLocation location = indexToLocation[variable->index()];
1959                     Node* incoming = resolve(block, location);
1960
1961                     m_insertionSet.insertNode(
1962                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1963                         OpInfo(phiNode), incoming->defaultEdge());
1964                 }
1965
1966                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1967                     Node* phiNode = phiDef->value();
1968                     SSACalculator::Variable* variable = phiDef->variable();
1969                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1970
1971                     m_insertionSet.insertNode(
1972                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1973                         OpInfo(phiNode), incoming->defaultEdge());
1974                 }
1975             }
1976
1977             m_insertionSet.execute(block);
1978         }
1979     }
1980
1981     NEVER_INLINE Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1982     {
1983         // If we are currently pointing to a single local allocation,
1984         // simply return the associated materialization.
1985         if (Node* identifier = m_heap.follow(location))
1986             return getMaterialization(block, identifier);
1987
1988         if (Node* result = m_localMapping.get(location))
1989             return result;
1990
1991         // This implies that there is no local mapping. Find a non-local mapping.
1992         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1993             block, m_locationToVariable.get(location));
1994         ASSERT(def);
1995         ASSERT(def->value());
1996
1997         Node* result = def->value();
1998         if (result->replacement())
1999             result = result->replacement();
2000         ASSERT(!result->replacement());
2001
2002         m_localMapping.add(location, result);
2003         return result;
2004     }
2005
2006     NEVER_INLINE Node* resolve(BasicBlock* block, Node* node)
2007     {
2008         // If we are currently pointing to a single local allocation,
2009         // simply return the associated materialization.
2010         if (Node* identifier = m_heap.follow(node))
2011             return getMaterialization(block, identifier);
2012
2013         if (node->replacement())
2014             node = node->replacement();
2015         ASSERT(!node->replacement());
2016
2017         return node;
2018     }
2019
2020     NEVER_INLINE Node* getMaterialization(BasicBlock* block, Node* identifier)
2021     {
2022         ASSERT(m_heap.isAllocation(identifier));
2023         if (!m_sinkCandidates.contains(identifier))
2024             return identifier;
2025
2026         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
2027             return materialization;
2028
2029         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
2030             block, m_nodeToVariable.get(identifier));
2031         ASSERT(def && def->value());
2032         m_escapeeToMaterialization.add(identifier, def->value());
2033         ASSERT(!def->value()->replacement());
2034         return def->value();
2035     }
2036
2037     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
2038     {
2039         if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
2040             dataLog("Inserting OSR hints at ", origin, ":\n");
2041             dataLog("    Escapee: ", escapee, "\n");
2042             dataLog("    Materialization: ", materialization, "\n");
2043             dataLog("    Availability: ", availability, "\n");
2044         }
2045         
2046         // We need to follow() the value in the heap.
2047         // Consider the following graph:
2048         //
2049         // Block #0
2050         //   0: NewObject({})
2051         //   1: NewObject({})
2052         //   -: PutByOffset(@0, @1, x:0)
2053         //   -: PutStructure(@0, {x:0})
2054         //   2: GetByOffset(@0, x:0)
2055         //   -: MovHint(@2, loc1)
2056         //   -: Branch(#1, #2)
2057         //
2058         // Block #1
2059         //   3: Call(f, @1)
2060         //   4: Return(@0)
2061         //
2062         // Block #2
2063         //   -: Return(undefined)
2064         //
2065         // We need to materialize @1 at @3, and when doing so we need
2066         // to insert a MovHint for the materialization into loc1 as
2067         // well.
2068         // In order to do this, we say that we need to insert an
2069         // update hint for any availability whose node resolve()s to
2070         // the materialization.
2071         for (auto entry : availability.m_heap) {
2072             if (!entry.value.hasNode())
2073                 continue;
2074             if (m_heap.follow(entry.value.node()) != escapee)
2075                 continue;
2076
2077             m_insertionSet.insert(
2078                 nodeIndex,
2079                 entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
2080         }
2081
2082         for (unsigned i = availability.m_locals.size(); i--;) {
2083             if (!availability.m_locals[i].hasNode())
2084                 continue;
2085             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
2086                 continue;
2087
2088             int operand = availability.m_locals.operandForIndex(i);
2089             m_insertionSet.insertNode(
2090                 nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
2091                 materialization->defaultEdge());
2092         }
2093     }
2094
2095     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
2096     {
2097         Allocation& allocation = m_heap.getAllocation(escapee);
2098         switch (node->op()) {
2099         case MaterializeNewObject: {
2100             ObjectMaterializationData& data = node->objectMaterializationData();
2101             unsigned firstChild = m_graph.m_varArgChildren.size();
2102
2103             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2104
2105             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
2106             ASSERT(locations.contains(structure));
2107
2108             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
2109
2110             for (PromotedHeapLocation location : locations) {
2111                 switch (location.kind()) {
2112                 case StructurePLoc:
2113                     ASSERT(location == structure);
2114                     break;
2115
2116                 case NamedPropertyPLoc: {
2117                     ASSERT(location.base() == allocation.identifier());
2118                     data.m_properties.append(location.descriptor());
2119                     Node* value = resolve(block, location);
2120                     if (m_sinkCandidates.contains(value))
2121                         m_graph.m_varArgChildren.append(m_bottom);
2122                     else
2123                         m_graph.m_varArgChildren.append(value);
2124                     break;
2125                 }
2126
2127                 default:
2128                     DFG_CRASH(m_graph, node, "Bad location kind");
2129                 }
2130             }
2131
2132             node->children = AdjacencyList(
2133                 AdjacencyList::Variable,
2134                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2135             break;
2136         }
2137
2138         case MaterializeCreateActivation: {
2139             ObjectMaterializationData& data = node->objectMaterializationData();
2140
2141             unsigned firstChild = m_graph.m_varArgChildren.size();
2142
2143             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2144
2145             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
2146             ASSERT(locations.contains(symbolTable));
2147             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
2148             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
2149
2150             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
2151             ASSERT(locations.contains(scope));
2152             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
2153
2154             for (PromotedHeapLocation location : locations) {
2155                 switch (location.kind()) {
2156                 case ActivationScopePLoc: {
2157                     ASSERT(location == scope);
2158                     break;
2159                 }
2160
2161                 case ActivationSymbolTablePLoc: {
2162                     ASSERT(location == symbolTable);
2163                     break;
2164                 }
2165
2166                 case ClosureVarPLoc: {
2167                     ASSERT(location.base() == allocation.identifier());
2168                     data.m_properties.append(location.descriptor());
2169                     Node* value = resolve(block, location);
2170                     if (m_sinkCandidates.contains(value))
2171                         m_graph.m_varArgChildren.append(m_bottom);
2172                     else
2173                         m_graph.m_varArgChildren.append(value);
2174                     break;
2175                 }
2176
2177                 default:
2178                     DFG_CRASH(m_graph, node, "Bad location kind");
2179                 }
2180             }
2181
2182             node->children = AdjacencyList(
2183                 AdjacencyList::Variable,
2184                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2185             break;
2186         }
2187         
2188         case NewFunction:
2189         case NewGeneratorFunction:
2190         case NewAsyncGeneratorFunction:
2191         case NewAsyncFunction: {
2192             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2193             ASSERT(locations.size() == 2);
2194                 
2195             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
2196             ASSERT_UNUSED(executable, locations.contains(executable));
2197                 
2198             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
2199             ASSERT(locations.contains(activation));
2200
2201             node->child1() = Edge(resolve(block, activation), KnownCellUse);
2202             break;
2203         }
2204
2205         case NewRegexp: {
2206             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2207             ASSERT(locations.size() == 2);
2208
2209             PromotedHeapLocation regExp(RegExpObjectRegExpPLoc, allocation.identifier());
2210             ASSERT_UNUSED(regExp, locations.contains(regExp));
2211
2212             PromotedHeapLocation lastIndex(RegExpObjectLastIndexPLoc, allocation.identifier());
2213             ASSERT(locations.contains(lastIndex));
2214             Node* value = resolve(block, lastIndex);
2215             if (m_sinkCandidates.contains(value))
2216                 node->child1() = Edge(m_bottom);
2217             else
2218                 node->child1() = Edge(value);
2219             break;
2220         }
2221
2222         default:
2223             DFG_CRASH(m_graph, node, "Bad materialize op");
2224         }
2225     }
2226
2227     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
2228     {
2229         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
2230             dataLog("Recovering ", location, " at ", where, "\n");
2231         ASSERT(location.base()->isPhantomAllocation());
2232         Node* base = getMaterialization(block, location.base());
2233         Node* value = resolve(block, location);
2234
2235         NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
2236
2237         if (DFGObjectAllocationSinkingPhaseInternal::verbose)
2238             dataLog("Base is ", base, " and value is ", value, "\n");
2239
2240         if (base->isPhantomAllocation()) {
2241             return PromotedHeapLocation(base, location.descriptor()).createHint(
2242                 m_graph, origin.takeValidExit(canExit), value);
2243         }
2244
2245         switch (location.kind()) {
2246         case NamedPropertyPLoc: {
2247             Allocation& allocation = m_heap.getAllocation(location.base());
2248
2249             Vector<RegisteredStructure> structures;
2250             structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2251             unsigned identifierNumber = location.info();
2252             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2253
2254             std::sort(
2255                 structures.begin(),
2256                 structures.end(),
2257                 [uid] (RegisteredStructure a, RegisteredStructure b) -> bool {
2258                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2259                 });
2260
2261             RELEASE_ASSERT(structures.size());
2262             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2263
2264             if (firstOffset == structures.last()->getConcurrently(uid)) {
2265                 Node* storage = base;
2266                 // FIXME: When we decide to sink objects with a
2267                 // property storage, we should handle non-inline offsets.
2268                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2269
2270                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2271                 data->offset = firstOffset;
2272                 data->identifierNumber = identifierNumber;
2273
2274                 return m_graph.addNode(
2275                     PutByOffset,
2276                     origin.takeValidExit(canExit),
2277                     OpInfo(data),
2278                     Edge(storage, KnownCellUse),
2279                     Edge(base, KnownCellUse),
2280                     value->defaultEdge());
2281             }
2282
2283             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2284             data->identifierNumber = identifierNumber;
2285
2286             {
2287                 PropertyOffset currentOffset = firstOffset;
2288                 StructureSet currentSet;
2289                 for (RegisteredStructure structure : structures) {
2290                     PropertyOffset offset = structure->getConcurrently(uid);
2291                     if (offset != currentOffset) {
2292                         // Because our analysis treats MultiPutByOffset like an escape, we only have to
2293                         // deal with storing results that would have been previously stored by PutByOffset
2294                         // nodes. Those nodes were guarded by the appropriate type checks. This means that
2295                         // at this point, we can simply trust that the incoming value has the right type
2296                         // for whatever structure we are using.
2297                         data->variants.append(
2298                             PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2299                         currentOffset = offset;
2300                         currentSet.clear();
2301                     }
2302                     currentSet.add(structure.get());
2303                 }
2304                 data->variants.append(
2305                     PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2306             }
2307
2308             return m_graph.addNode(
2309                 MultiPutByOffset,
2310                 origin.takeValidExit(canExit),
2311                 OpInfo(data),
2312                 Edge(base, KnownCellUse),
2313                 value->defaultEdge());
2314         }
2315
2316         case ClosureVarPLoc: {
2317             return m_graph.addNode(
2318                 PutClosureVar,
2319                 origin.takeValidExit(canExit),
2320                 OpInfo(location.info()),
2321                 Edge(base, KnownCellUse),
2322                 value->defaultEdge());
2323         }
2324
2325         case RegExpObjectLastIndexPLoc: {
2326             return m_graph.addNode(
2327                 SetRegExpObjectLastIndex,
2328                 origin.takeValidExit(canExit),
2329                 OpInfo(true),
2330                 Edge(base, KnownCellUse),
2331                 value->defaultEdge());
2332         }
2333
2334         default:
2335             DFG_CRASH(m_graph, base, "Bad location kind");
2336             break;
2337         }
2338
2339         RELEASE_ASSERT_NOT_REACHED();
2340     }
2341     
2342     void removeICStatusFilters()
2343     {
2344         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
2345             for (Node* node : *block) {
2346                 switch (node->op()) {
2347                 case FilterCallLinkStatus:
2348                 case FilterGetByIdStatus:
2349                 case FilterPutByIdStatus:
2350                 case FilterInByIdStatus:
2351                     if (node->child1()->isPhantomAllocation())
2352                         node->removeWithoutChecks();
2353                     break;
2354                 default:
2355                     break;
2356                 }
2357             }
2358         }
2359     }
2360
2361     // This is a great way of asking value->isStillValid() without having to worry about getting
2362     // different answers. It turns out that this analysis works OK regardless of what this
2363     // returns but breaks badly if this changes its mind for any particular InferredValue. This
2364     // method protects us from that.
2365     bool isStillValid(InferredValue* value)
2366     {
2367         return m_validInferredValues.add(value, value->isStillValid()).iterator->value;
2368     }
2369
2370     SSACalculator m_pointerSSA;
2371     SSACalculator m_allocationSSA;
2372     NodeSet m_sinkCandidates;
2373     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2374     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2375     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2376     HashMap<Node*, Node*> m_escapeeToMaterialization;
2377     InsertionSet m_insertionSet;
2378     CombinedLiveness m_combinedLiveness;
2379
2380     HashMap<InferredValue*, bool> m_validInferredValues;
2381
2382     HashMap<Node*, Node*> m_materializationToEscapee;
2383     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2384     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2385     HashMap<Node*, Vector<std::pair<PromotedHeapLocation, Node*>>> m_materializationSiteToHints;
2386
2387     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2388
2389     BlockMap<LocalHeap> m_heapAtHead;
2390     BlockMap<LocalHeap> m_heapAtTail;
2391     LocalHeap m_heap;
2392
2393     Node* m_bottom = nullptr;
2394 };
2395
2396 }
2397
2398 bool performObjectAllocationSinking(Graph& graph)
2399 {
2400     return runPhase<ObjectAllocationSinkingPhase>(graph);
2401 }
2402
2403 } } // namespace JSC::DFG
2404
2405 #endif // ENABLE(DFG_JIT)