Record the HashSet/HashMap operations in DFG/FTL/B3 and replay them in a benchmark
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGClobbersExitState.h"
33 #include "DFGCombinedLiveness.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertionSet.h"
36 #include "DFGLazyNode.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromotedHeapLocation.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 #include <list>
46
47 namespace JSC { namespace DFG {
48
49 namespace {
50
51 bool verbose = false;
52
53 // In order to sink object cycles, we use a points-to analysis coupled
54 // with an escape analysis. This analysis is actually similar to an
55 // abstract interpreter focused on local allocations and ignoring
56 // everything else.
57 //
58 // We represent the local heap using two mappings:
59 //
60 // - A set of the local allocations present in the function, where
61 //   each of those have a further mapping from
62 //   PromotedLocationDescriptor to local allocations they must point
63 //   to.
64 //
65 // - A "pointer" mapping from nodes to local allocations, if they must
66 //   be equal to said local allocation and are currently live. This
67 //   can be because the node is the actual node that created the
68 //   allocation, or any other node that must currently point to it -
69 //   we don't make a difference.
70 //
71 // The following graph is a motivation for why we separate allocations
72 // from pointers:
73 //
74 // Block #0
75 //  0: NewObject({})
76 //  1: NewObject({})
77 //  -: PutByOffset(@0, @1, x)
78 //  -: PutStructure(@0, {x:0})
79 //  2: GetByOffset(@0, x)
80 //  -: Jump(#1)
81 //
82 // Block #1
83 //  -: Return(@2)
84 //
85 // Here, we need to remember in block #1 that @2 points to a local
86 // allocation with appropriate fields and structures information
87 // (because we should be able to place a materialization on top of
88 // block #1 here), even though @1 is dead. We *could* just keep @1
89 // artificially alive here, but there is no real reason to do it:
90 // after all, by the end of block #0, @1 and @2 should be completely
91 // interchangeable, and there is no reason for us to artificially make
92 // @1 more important.
93 //
94 // An important point to consider to understand this separation is
95 // that we should think of the local heap as follow: we have a
96 // bunch of nodes that are pointers to "allocations" that live
97 // someplace on the heap, and those allocations can have pointers in
98 // between themselves as well. We shouldn't care about whatever
99 // names we give to the allocations ; what matters when
100 // comparing/merging two heaps is the isomorphism/comparison between
101 // the allocation graphs as seen by the nodes.
102 //
103 // For instance, in the following graph:
104 //
105 // Block #0
106 //  0: NewObject({})
107 //  -: Branch(#1, #2)
108 //
109 // Block #1
110 //  1: NewObject({})
111 //  -: PutByOffset(@0, @1, x)
112 //  -: PutStructure(@0, {x:0})
113 //  -: Jump(#3)
114 //
115 // Block #2
116 //  2: NewObject({})
117 //  -: PutByOffset(@2, undefined, x)
118 //  -: PutStructure(@2, {x:0})
119 //  -: PutByOffset(@0, @2, x)
120 //  -: PutStructure(@0, {x:0})
121 //  -: Jump(#3)
122 //
123 // Block #3
124 //  -: Return(@0)
125 //
126 // we should think of the heaps at tail of blocks #1 and #2 as being
127 // exactly the same, even though one has @0.x pointing to @1 and the
128 // other has @0.x pointing to @2, because in essence this should not
129 // be different from the graph where we hoisted @1 and @2 into a
130 // single allocation in block #0. We currently will not handle this
131 // case, because we merge allocations based on the node they are
132 // coming from, but this is only a technicality for the sake of
133 // simplicity that shouldn't hide the deeper idea outlined here.
134
135 class Allocation {
136 public:
137     // We use Escaped as a special allocation kind because when we
138     // decide to sink an allocation, we still need to keep track of it
139     // once it is escaped if it still has pointers to it in order to
140     // replace any use of those pointers by the corresponding
141     // materialization
142     enum class Kind { Escaped, Object, Activation, Function, GeneratorFunction, AsyncFunction };
143
144     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
145         : m_identifier(identifier)
146         , m_kind(kind)
147     {
148     }
149
150
151     const HashMap<PromotedLocationDescriptor, Node*>& fields() const
152     {
153         return m_fields;
154     }
155
156     Node* get(PromotedLocationDescriptor descriptor)
157     {
158         return m_fields.get(descriptor);
159     }
160
161     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
162     {
163         // Pointing to anything else than an unescaped local
164         // allocation is represented by simply not having the
165         // field
166         if (value)
167             m_fields.set(descriptor, value);
168         else
169             m_fields.remove(descriptor);
170         return *this;
171     }
172
173     void remove(PromotedLocationDescriptor descriptor)
174     {
175         set(descriptor, nullptr);
176     }
177
178     bool hasStructures() const
179     {
180         switch (kind()) {
181         case Kind::Object:
182             return true;
183
184         default:
185             return false;
186         }
187     }
188
189     Allocation& setStructures(const RegisteredStructureSet& structures)
190     {
191         ASSERT(hasStructures() && !structures.isEmpty());
192         m_structures = structures;
193         return *this;
194     }
195
196     Allocation& mergeStructures(const RegisteredStructureSet& structures)
197     {
198         ASSERT(hasStructures() || structures.isEmpty());
199         m_structures.merge(structures);
200         return *this;
201     }
202
203     Allocation& filterStructures(const RegisteredStructureSet& structures)
204     {
205         ASSERT(hasStructures());
206         m_structures.filter(structures);
207         return *this;
208     }
209
210     const RegisteredStructureSet& structures() const
211     {
212         return m_structures;
213     }
214
215     Node* identifier() const { return m_identifier; }
216
217     Kind kind() const { return m_kind; }
218
219     bool isEscapedAllocation() const
220     {
221         return kind() == Kind::Escaped;
222     }
223
224     bool isObjectAllocation() const
225     {
226         return m_kind == Kind::Object;
227     }
228
229     bool isActivationAllocation() const
230     {
231         return m_kind == Kind::Activation;
232     }
233
234     bool isFunctionAllocation() const
235     {
236         return m_kind == Kind::Function || m_kind == Kind::GeneratorFunction || m_kind == Kind::AsyncFunction;
237     }
238
239     bool operator==(const Allocation& other) const
240     {
241         return m_identifier == other.m_identifier
242             && m_kind == other.m_kind
243             && m_fields == other.m_fields
244             && m_structures == other.m_structures;
245     }
246
247     bool operator!=(const Allocation& other) const
248     {
249         return !(*this == other);
250     }
251
252     void dump(PrintStream& out) const
253     {
254         dumpInContext(out, nullptr);
255     }
256
257     void dumpInContext(PrintStream& out, DumpContext* context) const
258     {
259         switch (m_kind) {
260         case Kind::Escaped:
261             out.print("Escaped");
262             break;
263
264         case Kind::Object:
265             out.print("Object");
266             break;
267
268         case Kind::Function:
269             out.print("Function");
270             break;
271
272         case Kind::GeneratorFunction:
273             out.print("GeneratorFunction");
274             break;
275
276         case Kind::AsyncFunction:
277             out.print("AsyncFunction");
278             break;
279
280         case Kind::Activation:
281             out.print("Activation");
282             break;
283         }
284         out.print("Allocation(");
285         if (!m_structures.isEmpty())
286             out.print(inContext(m_structures.toStructureSet(), context));
287         if (!m_fields.isEmpty()) {
288             if (!m_structures.isEmpty())
289                 out.print(", ");
290             out.print(mapDump(m_fields, " => #", ", "));
291         }
292         out.print(")");
293     }
294
295 private:
296     Node* m_identifier; // This is the actual node that created the allocation
297     Kind m_kind;
298     HashMap<PromotedLocationDescriptor, Node*> m_fields;
299     RegisteredStructureSet m_structures;
300 };
301
302 class LocalHeap {
303 public:
304     Allocation& newAllocation(Node* node, Allocation::Kind kind)
305     {
306         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
307         m_pointers.add(node, node);
308         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
309     }
310
311     bool isAllocation(Node* identifier) const
312     {
313         return m_allocations.contains(identifier);
314     }
315
316     // Note that this is fundamentally different from
317     // onlyLocalAllocation() below. getAllocation() takes as argument
318     // a node-as-identifier, that is, an allocation node. This
319     // allocation node doesn't have to be alive; it may only be
320     // pointed to by other nodes or allocation fields.
321     // For instance, in the following graph:
322     //
323     // Block #0
324     //  0: NewObject({})
325     //  1: NewObject({})
326     //  -: PutByOffset(@0, @1, x)
327     //  -: PutStructure(@0, {x:0})
328     //  2: GetByOffset(@0, x)
329     //  -: Jump(#1)
330     //
331     // Block #1
332     //  -: Return(@2)
333     //
334     // At head of block #1, the only reachable allocation is #@1,
335     // which can be reached through node @2. Thus, getAllocation(#@1)
336     // contains the appropriate metadata for this allocation, but
337     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
338     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
339     // is the same as getAllocation(#@1), while getAllocation(#@2)
340     // does not make sense since @2 is not an allocation node.
341     //
342     // This is meant to be used when the node is already known to be
343     // an identifier (i.e. an allocation) - probably because it was
344     // found as value of a field or pointer in the current heap, or
345     // was the result of a call to follow(). In any other cases (such
346     // as when doing anything while traversing the graph), the
347     // appropriate function to call is probably onlyLocalAllocation.
348     Allocation& getAllocation(Node* identifier)
349     {
350         auto iter = m_allocations.find(identifier);
351         ASSERT(iter != m_allocations.end());
352         return iter->value;
353     }
354
355     void newPointer(Node* node, Node* identifier)
356     {
357         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
358         ASSERT(isAllocation(identifier));
359         m_pointers.add(node, identifier);
360     }
361
362     // follow solves the points-to problem. Given a live node, which
363     // may be either an allocation itself or a heap read (e.g. a
364     // GetByOffset node), it returns the corresponding allocation
365     // node, if there is one. If the argument node is neither an
366     // allocation or a heap read, or may point to different nodes,
367     // nullptr will be returned. Note that a node that points to
368     // different nodes can never point to an unescaped local
369     // allocation.
370     Node* follow(Node* node) const
371     {
372         auto iter = m_pointers.find(node);
373         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
374         return iter == m_pointers.end() ? nullptr : iter->value;
375     }
376
377     Node* follow(PromotedHeapLocation location) const
378     {
379         const Allocation& base = m_allocations.find(location.base())->value;
380         auto iter = base.fields().find(location.descriptor());
381
382         if (iter == base.fields().end())
383             return nullptr;
384
385         return iter->value;
386     }
387
388     // onlyLocalAllocation find the corresponding allocation metadata
389     // for any live node. onlyLocalAllocation(node) is essentially
390     // getAllocation(follow(node)), with appropriate null handling.
391     Allocation* onlyLocalAllocation(Node* node)
392     {
393         Node* identifier = follow(node);
394         if (!identifier)
395             return nullptr;
396
397         return &getAllocation(identifier);
398     }
399
400     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
401     {
402         Node* identifier = follow(location);
403         if (!identifier)
404             return nullptr;
405
406         return &getAllocation(identifier);
407     }
408
409     // This allows us to store the escapees only when necessary. If
410     // set, the current escapees can be retrieved at any time using
411     // takeEscapees(), which will clear the cached set of escapees;
412     // otherwise the heap won't remember escaping allocations.
413     void setWantEscapees()
414     {
415         m_wantEscapees = true;
416     }
417
418     HashMap<Node*, Allocation> takeEscapees()
419     {
420         return WTFMove(m_escapees);
421     }
422
423     void escape(Node* node)
424     {
425         Node* identifier = follow(node);
426         if (!identifier)
427             return;
428
429         escapeAllocation(identifier);
430     }
431
432     void merge(const LocalHeap& other)
433     {
434         assertIsValid();
435         other.assertIsValid();
436         ASSERT(!m_wantEscapees);
437
438         if (!reached()) {
439             ASSERT(other.reached());
440             *this = other;
441             return;
442         }
443
444         NodeSet toEscape;
445
446         for (auto& allocationEntry : other.m_allocations)
447             m_allocations.add(allocationEntry.key, allocationEntry.value);
448         for (auto& allocationEntry : m_allocations) {
449             auto allocationIter = other.m_allocations.find(allocationEntry.key);
450
451             // If we have it and they don't, it died for them but we
452             // are keeping it alive from another field somewhere.
453             // There is nothing to do - we will be escaped
454             // automatically when we handle that other field.
455             // This will also happen for allocation that we have and
456             // they don't, and all of those will get pruned.
457             if (allocationIter == other.m_allocations.end())
458                 continue;
459
460             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
461                 toEscape.addVoid(allocationEntry.key);
462                 for (const auto& fieldEntry : allocationIter->value.fields())
463                     toEscape.addVoid(fieldEntry.value);
464             } else {
465                 mergePointerSets(
466                     allocationEntry.value.fields(), allocationIter->value.fields(),
467                     [&] (Node* identifier) {
468                         toEscape.addVoid(identifier);
469                     },
470                     [&] (PromotedLocationDescriptor field) {
471                         allocationEntry.value.remove(field);
472                     });
473                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
474             }
475         }
476
477         mergePointerSets(m_pointers, other.m_pointers,
478             [&] (Node* identifier) {
479                 toEscape.addVoid(identifier);
480             },
481             [&] (Node* field) {
482                 m_pointers.remove(field);
483             });
484
485         for (Node* identifier : toEscape)
486             escapeAllocation(identifier);
487
488         if (!ASSERT_DISABLED) {
489             for (const auto& entry : m_allocations)
490                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
491         }
492
493         // If there is no remaining pointer to an allocation, we can
494         // remove it. This should only happen for escaped allocations,
495         // because we only merge liveness-pruned heaps in the first
496         // place.
497         prune();
498
499         assertIsValid();
500     }
501
502     void pruneByLiveness(const NodeSet& live)
503     {
504         Vector<Node*> toRemove;
505         for (const auto& entry : m_pointers) {
506             if (!live.contains(entry.key))
507                 toRemove.append(entry.key);
508         }
509         for (Node* node : toRemove)
510             m_pointers.remove(node);
511
512         prune();
513     }
514
515     void assertIsValid() const
516     {
517         if (ASSERT_DISABLED)
518             return;
519
520         // Pointers should point to an actual allocation
521         for (const auto& entry : m_pointers) {
522             ASSERT_UNUSED(entry, entry.value);
523             ASSERT(m_allocations.contains(entry.value));
524         }
525
526         for (const auto& allocationEntry : m_allocations) {
527             // Fields should point to an actual allocation
528             for (const auto& fieldEntry : allocationEntry.value.fields()) {
529                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
530                 ASSERT(m_allocations.contains(fieldEntry.value));
531             }
532         }
533     }
534
535     bool operator==(const LocalHeap& other) const
536     {
537         assertIsValid();
538         other.assertIsValid();
539         return m_allocations == other.m_allocations
540             && m_pointers == other.m_pointers;
541     }
542
543     bool operator!=(const LocalHeap& other) const
544     {
545         return !(*this == other);
546     }
547
548     const HashMap<Node*, Allocation>& allocations() const
549     {
550         return m_allocations;
551     }
552
553     const HashMap<Node*, Node*>& pointers() const
554     {
555         return m_pointers;
556     }
557
558     void dump(PrintStream& out) const
559     {
560         out.print("  Allocations:\n");
561         for (const auto& entry : m_allocations)
562             out.print("    #", entry.key, ": ", entry.value, "\n");
563         out.print("  Pointers:\n");
564         for (const auto& entry : m_pointers)
565             out.print("    ", entry.key, " => #", entry.value, "\n");
566     }
567
568     bool reached() const
569     {
570         return m_reached;
571     }
572
573     void setReached()
574     {
575         m_reached = true;
576     }
577
578 private:
579     // When we merge two heaps, we escape all fields of allocations,
580     // unless they point to the same thing in both heaps.
581     // The reason for this is that it allows us not to do extra work
582     // for diamond graphs where we would otherwise have to check
583     // whether we have a single definition or not, which would be
584     // cumbersome.
585     //
586     // Note that we should try to unify nodes even when they are not
587     // from the same allocation; for instance we should be able to
588     // completely eliminate all allocations from the following graph:
589     //
590     // Block #0
591     //  0: NewObject({})
592     //  -: Branch(#1, #2)
593     //
594     // Block #1
595     //  1: NewObject({})
596     //  -: PutByOffset(@1, "left", val)
597     //  -: PutStructure(@1, {val:0})
598     //  -: PutByOffset(@0, @1, x)
599     //  -: PutStructure(@0, {x:0})
600     //  -: Jump(#3)
601     //
602     // Block #2
603     //  2: NewObject({})
604     //  -: PutByOffset(@2, "right", val)
605     //  -: PutStructure(@2, {val:0})
606     //  -: PutByOffset(@0, @2, x)
607     //  -: PutStructure(@0, {x:0})
608     //  -: Jump(#3)
609     //
610     // Block #3:
611     //  3: GetByOffset(@0, x)
612     //  4: GetByOffset(@3, val)
613     //  -: Return(@4)
614     template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
615     void mergePointerSets(
616         const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
617         const EscapeFunctor& escape, const RemoveFunctor& remove)
618     {
619         Vector<Key> toRemove;
620         for (const auto& entry : my) {
621             auto iter = their.find(entry.key);
622             if (iter == their.end()) {
623                 toRemove.append(entry.key);
624                 escape(entry.value);
625             } else if (iter->value != entry.value) {
626                 toRemove.append(entry.key);
627                 escape(entry.value);
628                 escape(iter->value);
629             }
630         }
631         for (const auto& entry : their) {
632             if (my.contains(entry.key))
633                 continue;
634             escape(entry.value);
635         }
636         for (Key key : toRemove)
637             remove(key);
638     }
639
640     void escapeAllocation(Node* identifier)
641     {
642         Allocation& allocation = getAllocation(identifier);
643         if (allocation.isEscapedAllocation())
644             return;
645
646         Allocation unescaped = WTFMove(allocation);
647         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
648
649         for (const auto& entry : unescaped.fields())
650             escapeAllocation(entry.value);
651
652         if (m_wantEscapees)
653             m_escapees.add(unescaped.identifier(), WTFMove(unescaped));
654     }
655
656     void prune()
657     {
658         NodeSet reachable;
659         for (const auto& entry : m_pointers)
660             reachable.addVoid(entry.value);
661
662         // Repeatedly mark as reachable allocations in fields of other
663         // reachable allocations
664         {
665             Vector<Node*> worklist;
666             worklist.appendRange(reachable.begin(), reachable.end());
667
668             while (!worklist.isEmpty()) {
669                 Node* identifier = worklist.takeLast();
670                 Allocation& allocation = m_allocations.find(identifier)->value;
671                 for (const auto& entry : allocation.fields()) {
672                     if (reachable.add(entry.value).isNewEntry)
673                         worklist.append(entry.value);
674                 }
675             }
676         }
677
678         // Remove unreachable allocations
679         {
680             Vector<Node*> toRemove;
681             for (const auto& entry : m_allocations) {
682                 if (!reachable.contains(entry.key))
683                     toRemove.append(entry.key);
684             }
685             for (Node* identifier : toRemove)
686                 m_allocations.remove(identifier);
687         }
688     }
689
690     bool m_reached = false;
691     HashMap<Node*, Node*> m_pointers;
692     HashMap<Node*, Allocation> m_allocations;
693
694     bool m_wantEscapees = false;
695     HashMap<Node*, Allocation> m_escapees;
696 };
697
698 class ObjectAllocationSinkingPhase : public Phase {
699 public:
700     ObjectAllocationSinkingPhase(Graph& graph)
701         : Phase(graph, "object allocation elimination")
702         , m_pointerSSA(graph)
703         , m_allocationSSA(graph)
704         , m_insertionSet(graph)
705     {
706     }
707
708     bool run()
709     {
710         ASSERT(m_graph.m_form == SSA);
711         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
712
713         if (!performSinking())
714             return false;
715
716         if (verbose) {
717             dataLog("Graph after elimination:\n");
718             m_graph.dump();
719         }
720
721         return true;
722     }
723
724 private:
725     bool performSinking()
726     {
727         m_graph.computeRefCounts();
728         m_graph.initializeNodeOwners();
729         m_graph.ensureDominators();
730         performLivenessAnalysis(m_graph);
731         performOSRAvailabilityAnalysis(m_graph);
732         m_combinedLiveness = CombinedLiveness(m_graph);
733
734         CString graphBeforeSinking;
735         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
736             StringPrintStream out;
737             m_graph.dump(out);
738             graphBeforeSinking = out.toCString();
739         }
740
741         if (verbose) {
742             dataLog("Graph before elimination:\n");
743             m_graph.dump();
744         }
745
746         performAnalysis();
747
748         if (!determineSinkCandidates())
749             return false;
750
751         if (verbose) {
752             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
753                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
754                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
755             }
756         }
757
758         promoteLocalHeap();
759
760         if (Options::validateGraphAtEachPhase())
761             DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
762         return true;
763     }
764
765     void performAnalysis()
766     {
767         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
768         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
769
770         bool changed;
771         do {
772             if (verbose)
773                 dataLog("Doing iteration of escape analysis.\n");
774             changed = false;
775
776             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
777                 m_heapAtHead[block].setReached();
778                 m_heap = m_heapAtHead[block];
779
780                 for (Node* node : *block) {
781                     handleNode(
782                         node,
783                         [] (PromotedHeapLocation, LazyNode) { },
784                         [&] (PromotedHeapLocation) -> Node* {
785                             return nullptr;
786                         });
787                 }
788
789                 if (m_heap == m_heapAtTail[block])
790                     continue;
791
792                 m_heapAtTail[block] = m_heap;
793                 changed = true;
794
795                 m_heap.assertIsValid();
796
797                 // We keep only pointers that are live, and only
798                 // allocations that are either live, pointed to by a
799                 // live pointer, or (recursively) stored in a field of
800                 // a live allocation.
801                 //
802                 // This means we can accidentaly leak non-dominating
803                 // nodes into the successor. However, due to the
804                 // non-dominance property, we are guaranteed that the
805                 // successor has at least one predecessor that is not
806                 // dominated either: this means any reference to a
807                 // non-dominating allocation in the successor will
808                 // trigger an escape and get pruned during the merge.
809                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
810
811                 for (BasicBlock* successorBlock : block->successors())
812                     m_heapAtHead[successorBlock].merge(m_heap);
813             }
814         } while (changed);
815     }
816
817     template<typename WriteFunctor, typename ResolveFunctor>
818     void handleNode(
819         Node* node,
820         const WriteFunctor& heapWrite,
821         const ResolveFunctor& heapResolve)
822     {
823         m_heap.assertIsValid();
824         ASSERT(m_heap.takeEscapees().isEmpty());
825
826         Allocation* target = nullptr;
827         HashMap<PromotedLocationDescriptor, LazyNode> writes;
828         PromotedLocationDescriptor exactRead;
829
830         switch (node->op()) {
831         case NewObject:
832             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
833             target->setStructures(node->structure());
834             writes.add(
835                 StructurePLoc, LazyNode(m_graph.freeze(node->structure().get())));
836             break;
837
838         case NewFunction:
839         case NewGeneratorFunction:
840         case NewAsyncFunction: {
841             if (isStillValid(node->castOperand<FunctionExecutable*>()->singletonFunction())) {
842                 m_heap.escape(node->child1().node());
843                 break;
844             }
845
846             if (node->op() == NewGeneratorFunction)
847                 target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction);
848             else if (node->op() == NewAsyncFunction)
849                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncFunction);
850             else
851                 target = &m_heap.newAllocation(node, Allocation::Kind::Function);
852
853             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
854             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
855             break;
856         }
857
858         case CreateActivation: {
859             if (isStillValid(node->castOperand<SymbolTable*>()->singletonScope())) {
860                 m_heap.escape(node->child1().node());
861                 break;
862             }
863             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
864             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
865             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
866             {
867                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
868                 ConcurrentJSLocker locker(symbolTable->m_lock);
869                 LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
870                 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
871                     writes.add(
872                         PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
873                         initialValue);
874                 }
875             }
876             break;
877         }
878
879         case PutStructure:
880             target = m_heap.onlyLocalAllocation(node->child1().node());
881             if (target && target->isObjectAllocation()) {
882                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next.get()))));
883                 target->setStructures(node->transition()->next);
884             } else
885                 m_heap.escape(node->child1().node());
886             break;
887
888         case CheckStructure: {
889             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
890             if (allocation && allocation->isObjectAllocation()) {
891                 allocation->filterStructures(node->structureSet());
892                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
893                     node->convertToCheckStructureImmediate(value);
894             } else
895                 m_heap.escape(node->child1().node());
896             break;
897         }
898
899         case GetByOffset:
900         case GetGetterSetterByOffset:
901             target = m_heap.onlyLocalAllocation(node->child2().node());
902             if (target && target->isObjectAllocation()) {
903                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
904                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
905             } else {
906                 m_heap.escape(node->child1().node());
907                 m_heap.escape(node->child2().node());
908             }
909             break;
910
911         case MultiGetByOffset: {
912             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
913             if (allocation && allocation->isObjectAllocation()) {
914                 MultiGetByOffsetData& data = node->multiGetByOffsetData();
915                 RegisteredStructureSet validStructures;
916                 bool hasInvalidStructures = false;
917                 for (const auto& multiGetByOffsetCase : data.cases) {
918                     if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
919                         continue;
920
921                     switch (multiGetByOffsetCase.method().kind()) {
922                     case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
923                     case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
924                         hasInvalidStructures = true;
925                         break;
926
927                     case GetByOffsetMethod::Load: // We're good
928                         validStructures.merge(multiGetByOffsetCase.set());
929                         break;
930
931                     default:
932                         RELEASE_ASSERT_NOT_REACHED();
933                     }
934                 }
935                 if (hasInvalidStructures) {
936                     m_heap.escape(node->child1().node());
937                     break;
938                 }
939                 unsigned identifierNumber = data.identifierNumber;
940                 PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
941                 if (Node* value = heapResolve(location)) {
942                     if (allocation->structures().isSubsetOf(validStructures))
943                         node->replaceWith(value);
944                     else {
945                         Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
946                         ASSERT(structure);
947                         allocation->filterStructures(validStructures);
948                         node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
949                         node->convertToCheckStructureImmediate(structure);
950                         node->setReplacement(value);
951                     }
952                 } else if (!allocation->structures().isSubsetOf(validStructures)) {
953                     // Even though we don't need the result here, we still need
954                     // to make the call to tell our caller that we could need
955                     // the StructurePLoc.
956                     // The reason for this is that when we decide not to sink a
957                     // node, we will still lower any read to its fields before
958                     // it escapes (which are usually reads across a function
959                     // call that DFGClobberize can't handle) - but we only do
960                     // this for PromotedHeapLocations that we have seen read
961                     // during the analysis!
962                     heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
963                     allocation->filterStructures(validStructures);
964                 }
965                 Node* identifier = allocation->get(location.descriptor());
966                 if (identifier)
967                     m_heap.newPointer(node, identifier);
968             } else
969                 m_heap.escape(node->child1().node());
970             break;
971         }
972
973         case PutByOffset:
974             target = m_heap.onlyLocalAllocation(node->child2().node());
975             if (target && target->isObjectAllocation()) {
976                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
977                 writes.add(
978                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
979                     LazyNode(node->child3().node()));
980             } else {
981                 m_heap.escape(node->child1().node());
982                 m_heap.escape(node->child2().node());
983                 m_heap.escape(node->child3().node());
984             }
985             break;
986
987         case GetClosureVar:
988             target = m_heap.onlyLocalAllocation(node->child1().node());
989             if (target && target->isActivationAllocation()) {
990                 exactRead =
991                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
992             } else
993                 m_heap.escape(node->child1().node());
994             break;
995
996         case PutClosureVar:
997             target = m_heap.onlyLocalAllocation(node->child1().node());
998             if (target && target->isActivationAllocation()) {
999                 writes.add(
1000                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
1001                     LazyNode(node->child2().node()));
1002             } else {
1003                 m_heap.escape(node->child1().node());
1004                 m_heap.escape(node->child2().node());
1005             }
1006             break;
1007
1008         case SkipScope:
1009             target = m_heap.onlyLocalAllocation(node->child1().node());
1010             if (target && target->isActivationAllocation())
1011                 exactRead = ActivationScopePLoc;
1012             else
1013                 m_heap.escape(node->child1().node());
1014             break;
1015
1016         case GetExecutable:
1017             target = m_heap.onlyLocalAllocation(node->child1().node());
1018             if (target && target->isFunctionAllocation())
1019                 exactRead = FunctionExecutablePLoc;
1020             else
1021                 m_heap.escape(node->child1().node());
1022             break;
1023
1024         case GetScope:
1025             target = m_heap.onlyLocalAllocation(node->child1().node());
1026             if (target && target->isFunctionAllocation())
1027                 exactRead = FunctionActivationPLoc;
1028             else
1029                 m_heap.escape(node->child1().node());
1030             break;
1031
1032         case Check:
1033             m_graph.doToChildren(
1034                 node,
1035                 [&] (Edge edge) {
1036                     if (edge.willNotHaveCheck())
1037                         return;
1038
1039                     if (alreadyChecked(edge.useKind(), SpecObject))
1040                         return;
1041
1042                     m_heap.escape(edge.node());
1043                 });
1044             break;
1045
1046         case MovHint:
1047         case PutHint:
1048             // Handled by OSR availability analysis
1049             break;
1050
1051         default:
1052             m_graph.doToChildren(
1053                 node,
1054                 [&] (Edge edge) {
1055                     m_heap.escape(edge.node());
1056                 });
1057             break;
1058         }
1059
1060         if (exactRead) {
1061             ASSERT(target);
1062             ASSERT(writes.isEmpty());
1063             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1064                 ASSERT(!value->replacement());
1065                 node->replaceWith(value);
1066             }
1067             Node* identifier = target->get(exactRead);
1068             if (identifier)
1069                 m_heap.newPointer(node, identifier);
1070         }
1071
1072         for (auto entry : writes) {
1073             ASSERT(target);
1074             if (entry.value.isNode())
1075                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1076             else
1077                 target->remove(entry.key);
1078             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1079         }
1080
1081         m_heap.assertIsValid();
1082     }
1083
1084     bool determineSinkCandidates()
1085     {
1086         m_sinkCandidates.clear();
1087         m_materializationToEscapee.clear();
1088         m_materializationSiteToMaterializations.clear();
1089         m_materializationSiteToRecoveries.clear();
1090         m_materializationSiteToHints.clear();
1091
1092         // Logically we wish to consider every allocation and sink
1093         // it. However, it is probably not profitable to sink an
1094         // allocation that will always escape. So, we only sink an
1095         // allocation if one of the following is true:
1096         //
1097         // 1) There exists a basic block with only backwards outgoing
1098         //    edges (or no outgoing edges) in which the node wasn't
1099         //    materialized. This is meant to catch
1100         //    effectively-infinite loops in which we don't need to
1101         //    have allocated the object.
1102         //
1103         // 2) There exists a basic block at the tail of which the node
1104         //    is dead and not materialized.
1105         //
1106         // 3) The sum of execution counts of the materializations is
1107         //    less than the sum of execution counts of the original
1108         //    node.
1109         //
1110         // We currently implement only rule #2.
1111         // FIXME: Implement the two other rules.
1112         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1113         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1114         //
1115         // However, these rules allow for a sunk object to be put into
1116         // a non-sunk one, which we don't support. We could solve this
1117         // by supporting PutHints on local allocations, making these
1118         // objects only partially correct, and we would need to adapt
1119         // the OSR availability analysis and OSR exit to handle
1120         // this. This would be totally doable, but would create a
1121         // super rare, and thus bug-prone, code path.
1122         // So, instead, we need to implement one of the following
1123         // closure rules:
1124         //
1125         // 1) If we put a sink candidate into a local allocation that
1126         //    is not a sink candidate, change our minds and don't
1127         //    actually sink the sink candidate.
1128         //
1129         // 2) If we put a sink candidate into a local allocation, that
1130         //    allocation becomes a sink candidate as well.
1131         //
1132         // We currently choose to implement closure rule #2.
1133         HashMap<Node*, Vector<Node*>> dependencies;
1134         bool hasUnescapedReads = false;
1135         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1136             m_heap = m_heapAtHead[block];
1137
1138             for (Node* node : *block) {
1139                 handleNode(
1140                     node,
1141                     [&] (PromotedHeapLocation location, LazyNode value) {
1142                         if (!value.isNode())
1143                             return;
1144
1145                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1146                         if (allocation && !allocation->isEscapedAllocation())
1147                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1148                     },
1149                     [&] (PromotedHeapLocation) -> Node* {
1150                         hasUnescapedReads = true;
1151                         return nullptr;
1152                     });
1153             }
1154
1155             // The sink candidates are initially the unescaped
1156             // allocations dying at tail of blocks
1157             NodeSet allocations;
1158             for (const auto& entry : m_heap.allocations()) {
1159                 if (!entry.value.isEscapedAllocation())
1160                     allocations.addVoid(entry.key);
1161             }
1162
1163             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1164
1165             for (Node* identifier : allocations) {
1166                 if (!m_heap.isAllocation(identifier))
1167                     m_sinkCandidates.addVoid(identifier);
1168             }
1169         }
1170
1171         // Ensure that the set of sink candidates is closed for put operations
1172         Vector<Node*> worklist;
1173         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1174
1175         while (!worklist.isEmpty()) {
1176             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1177                 if (m_sinkCandidates.add(identifier).isNewEntry)
1178                     worklist.append(identifier);
1179             }
1180         }
1181
1182         if (m_sinkCandidates.isEmpty())
1183             return hasUnescapedReads;
1184
1185         if (verbose)
1186             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1187
1188         // Create the materialization nodes
1189         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1190             m_heap = m_heapAtHead[block];
1191             m_heap.setWantEscapees();
1192
1193             for (Node* node : *block) {
1194                 handleNode(
1195                     node,
1196                     [] (PromotedHeapLocation, LazyNode) { },
1197                     [] (PromotedHeapLocation) -> Node* {
1198                         return nullptr;
1199                     });
1200                 auto escapees = m_heap.takeEscapees();
1201                 if (!escapees.isEmpty())
1202                     placeMaterializations(escapees, node);
1203             }
1204
1205             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1206
1207             {
1208                 HashMap<Node*, Allocation> escapingOnEdge;
1209                 for (const auto& entry : m_heap.allocations()) {
1210                     if (entry.value.isEscapedAllocation())
1211                         continue;
1212
1213                     bool mustEscape = false;
1214                     for (BasicBlock* successorBlock : block->successors()) {
1215                         if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1216                             || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1217                             mustEscape = true;
1218                     }
1219
1220                     if (mustEscape)
1221                         escapingOnEdge.add(entry.key, entry.value);
1222                 }
1223                 placeMaterializations(WTFMove(escapingOnEdge), block->terminal());
1224             }
1225         }
1226
1227         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1228     }
1229
1230     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1231     {
1232         // We don't create materializations if the escapee is not a
1233         // sink candidate
1234         Vector<Node*> toRemove;
1235         for (const auto& entry : escapees) {
1236             if (!m_sinkCandidates.contains(entry.key))
1237                 toRemove.append(entry.key);
1238         }
1239         for (Node* identifier : toRemove)
1240             escapees.remove(identifier);
1241
1242         if (escapees.isEmpty())
1243             return;
1244
1245         // First collect the hints that will be needed when the node
1246         // we materialize is still stored into other unescaped sink candidates.
1247         // The way to interpret this vector is:
1248         //
1249         // PromotedHeapLocation(NotEscapedAllocation, field) = identifierAllocation
1250         //
1251         // e.g:
1252         // PromotedHeapLocation(@PhantomNewFunction, FunctionActivationPLoc) = IdentifierOf(@MaterializeCreateActivation)
1253         // or:
1254         // PromotedHeapLocation(@PhantomCreateActivation, ClosureVarPLoc(x)) = IdentifierOf(@NewFunction)
1255         //
1256         // When the rhs of the `=` is to be materialized at this `where` point in the program
1257         // and IdentifierOf(Materialization) is the original sunken allocation of the materialization.
1258         //
1259         // The reason we need to collect all the `identifiers` here is that
1260         // we may materialize multiple versions of the allocation along control
1261         // flow edges. We will PutHint these values along those edges. However,
1262         // we also need to PutHint them when we join and have a Phi of the allocations.
1263         Vector<std::pair<PromotedHeapLocation, Node*>> hints;
1264         for (const auto& entry : m_heap.allocations()) {
1265             if (escapees.contains(entry.key))
1266                 continue;
1267
1268             for (const auto& field : entry.value.fields()) {
1269                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1270                 auto iter = escapees.find(field.value);
1271                 if (iter != escapees.end()) {
1272                     ASSERT(m_sinkCandidates.contains(field.value));
1273                     hints.append(std::make_pair(PromotedHeapLocation(entry.key, field.key), field.value));
1274                 }
1275             }
1276         }
1277
1278         // Now we need to order the materialization. Any order is
1279         // valid (as long as we materialize a node first if it is
1280         // needed for the materialization of another node, e.g. a
1281         // function's activation must be materialized before the
1282         // function itself), but we want to try minimizing the number
1283         // of times we have to place Puts to close cycles after a
1284         // materialization. In other words, we are trying to find the
1285         // minimum number of materializations to remove from the
1286         // materialization graph to make it a DAG, known as the
1287         // (vertex) feedback set problem. Unfortunately, this is a
1288         // NP-hard problem, which we don't want to solve exactly.
1289         //
1290         // Instead, we use a simple greedy procedure, that procedes as
1291         // follow:
1292         //  - While there is at least one node with no outgoing edge
1293         //    amongst the remaining materializations, materialize it
1294         //    first
1295         //
1296         //  - Similarily, while there is at least one node with no
1297         //    incoming edge amongst the remaining materializations,
1298         //    materialize it last.
1299         //
1300         //  - When both previous conditions are false, we have an
1301         //    actual cycle, and we need to pick a node to
1302         //    materialize. We try greedily to remove the "pressure" on
1303         //    the remaining nodes by choosing the node with maximum
1304         //    |incoming edges| * |outgoing edges| as a measure of how
1305         //    "central" to the graph it is. We materialize it first,
1306         //    so that all the recoveries will be Puts of things into
1307         //    it (rather than Puts of the materialization into other
1308         //    objects), which means we will have a single
1309         //    StoreBarrier.
1310
1311
1312         // Compute dependencies between materializations
1313         HashMap<Node*, NodeSet> dependencies;
1314         HashMap<Node*, NodeSet> reverseDependencies;
1315         HashMap<Node*, NodeSet> forMaterialization;
1316         for (const auto& entry : escapees) {
1317             auto& myDependencies = dependencies.add(entry.key, NodeSet()).iterator->value;
1318             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, NodeSet()).iterator->value;
1319             reverseDependencies.add(entry.key, NodeSet());
1320             for (const auto& field : entry.value.fields()) {
1321                 if (escapees.contains(field.value) && field.value != entry.key) {
1322                     myDependencies.addVoid(field.value);
1323                     reverseDependencies.add(field.value, NodeSet()).iterator->value.addVoid(entry.key);
1324                     if (field.key.neededForMaterialization())
1325                         myDependenciesForMaterialization.addVoid(field.value);
1326                 }
1327             }
1328         }
1329
1330         // Helper function to update the materialized set and the
1331         // dependencies
1332         NodeSet materialized;
1333         auto materialize = [&] (Node* identifier) {
1334             materialized.addVoid(identifier);
1335             for (Node* dep : dependencies.get(identifier))
1336                 reverseDependencies.find(dep)->value.remove(identifier);
1337             for (Node* rdep : reverseDependencies.get(identifier)) {
1338                 dependencies.find(rdep)->value.remove(identifier);
1339                 forMaterialization.find(rdep)->value.remove(identifier);
1340             }
1341             dependencies.remove(identifier);
1342             reverseDependencies.remove(identifier);
1343             forMaterialization.remove(identifier);
1344         };
1345
1346         // Nodes without remaining unmaterialized fields will be
1347         // materialized first - amongst the remaining unmaterialized
1348         // nodes
1349         std::list<Allocation> toMaterialize;
1350         auto firstPos = toMaterialize.begin();
1351         auto materializeFirst = [&] (Allocation&& allocation) {
1352             materialize(allocation.identifier());
1353             // We need to insert *after* the current position
1354             if (firstPos != toMaterialize.end())
1355                 ++firstPos;
1356             firstPos = toMaterialize.insert(firstPos, WTFMove(allocation));
1357         };
1358
1359         // Nodes that no other unmaterialized node points to will be
1360         // materialized last - amongst the remaining unmaterialized
1361         // nodes
1362         auto lastPos = toMaterialize.end();
1363         auto materializeLast = [&] (Allocation&& allocation) {
1364             materialize(allocation.identifier());
1365             lastPos = toMaterialize.insert(lastPos, WTFMove(allocation));
1366         };
1367
1368         // These are the promoted locations that contains some of the
1369         // allocations we are currently escaping. If they are a location on
1370         // some other allocation we are currently materializing, we will need
1371         // to "recover" their value with a real put once the corresponding
1372         // allocation is materialized; if they are a location on some other
1373         // not-yet-materialized allocation, we will need a PutHint.
1374         Vector<PromotedHeapLocation> toRecover;
1375
1376         // This loop does the actual cycle breaking
1377         while (!escapees.isEmpty()) {
1378             materialized.clear();
1379
1380             // Materialize nodes that won't require recoveries if we can
1381             for (auto& entry : escapees) {
1382                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1383                     continue;
1384
1385                 if (dependencies.find(entry.key)->value.isEmpty()) {
1386                     materializeFirst(WTFMove(entry.value));
1387                     continue;
1388                 }
1389
1390                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1391                     materializeLast(WTFMove(entry.value));
1392                     continue;
1393                 }
1394             }
1395
1396             // We reach this only if there is an actual cycle that needs
1397             // breaking. Because we do not want to solve a NP-hard problem
1398             // here, we just heuristically pick a node and materialize it
1399             // first.
1400             if (materialized.isEmpty()) {
1401                 uint64_t maxEvaluation = 0;
1402                 Allocation* bestAllocation = nullptr;
1403                 for (auto& entry : escapees) {
1404                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1405                         continue;
1406
1407                     uint64_t evaluation =
1408                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1409                     if (evaluation > maxEvaluation) {
1410                         maxEvaluation = evaluation;
1411                         bestAllocation = &entry.value;
1412                     }
1413                 }
1414                 RELEASE_ASSERT(maxEvaluation > 0);
1415
1416                 materializeFirst(WTFMove(*bestAllocation));
1417             }
1418             RELEASE_ASSERT(!materialized.isEmpty());
1419
1420             for (Node* identifier : materialized)
1421                 escapees.remove(identifier);
1422         }
1423
1424         materialized.clear();
1425
1426         NodeSet escaped;
1427         for (const Allocation& allocation : toMaterialize)
1428             escaped.addVoid(allocation.identifier());
1429         for (const Allocation& allocation : toMaterialize) {
1430             for (const auto& field : allocation.fields()) {
1431                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1432                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1433             }
1434             materialized.addVoid(allocation.identifier());
1435         }
1436
1437         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1438             where, Vector<Node*>()).iterator->value;
1439
1440         for (const Allocation& allocation : toMaterialize) {
1441             Node* materialization = createMaterialization(allocation, where);
1442             materializations.append(materialization);
1443             m_materializationToEscapee.add(materialization, allocation.identifier());
1444         }
1445
1446         if (!toRecover.isEmpty()) {
1447             m_materializationSiteToRecoveries.add(
1448                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1449         }
1450
1451         // The hints need to be after the "real" recoveries so that we
1452         // don't hint not-yet-complete objects
1453         m_materializationSiteToHints.add(
1454             where, Vector<std::pair<PromotedHeapLocation, Node*>>()).iterator->value.appendVector(hints);
1455     }
1456
1457     Node* createMaterialization(const Allocation& allocation, Node* where)
1458     {
1459         // FIXME: This is the only place where we actually use the
1460         // fact that an allocation's identifier is indeed the node
1461         // that created the allocation.
1462         switch (allocation.kind()) {
1463         case Allocation::Kind::Object: {
1464             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1465
1466             return m_graph.addNode(
1467                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1468                 where->origin.withSemantic(allocation.identifier()->origin.semantic),
1469                 OpInfo(m_graph.addStructureSet(allocation.structures())), OpInfo(data), 0, 0);
1470         }
1471
1472         case Allocation::Kind::AsyncFunction:
1473         case Allocation::Kind::GeneratorFunction:
1474         case Allocation::Kind::Function: {
1475             FrozenValue* executable = allocation.identifier()->cellOperand();
1476             
1477             NodeType nodeType =
1478                 allocation.kind() == Allocation::Kind::GeneratorFunction ? NewGeneratorFunction :
1479                 allocation.kind() == Allocation::Kind::AsyncFunction ? NewAsyncFunction : NewFunction;
1480             
1481             return m_graph.addNode(
1482                 allocation.identifier()->prediction(), nodeType,
1483                 where->origin.withSemantic(
1484                     allocation.identifier()->origin.semantic),
1485                 OpInfo(executable));
1486             break;
1487         }
1488
1489         case Allocation::Kind::Activation: {
1490             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1491             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1492
1493             return m_graph.addNode(
1494                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1495                 where->origin.withSemantic(
1496                     allocation.identifier()->origin.semantic),
1497                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1498         }
1499
1500         default:
1501             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1502         }
1503     }
1504
1505     void promoteLocalHeap()
1506     {
1507         // Collect the set of heap locations that we will be operating
1508         // over.
1509         HashSet<PromotedHeapLocation> locations;
1510         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1511             m_heap = m_heapAtHead[block];
1512
1513             for (Node* node : *block) {
1514                 handleNode(
1515                     node,
1516                     [&] (PromotedHeapLocation location, LazyNode) {
1517                         // If the location is not on a sink candidate,
1518                         // we only sink it if it is read
1519                         if (m_sinkCandidates.contains(location.base()))
1520                             locations.addVoid(location);
1521                     },
1522                     [&] (PromotedHeapLocation location) -> Node* {
1523                         locations.addVoid(location);
1524                         return nullptr;
1525                     });
1526             }
1527         }
1528
1529         // Figure out which locations belong to which allocations.
1530         m_locationsForAllocation.clear();
1531         for (PromotedHeapLocation location : locations) {
1532             auto result = m_locationsForAllocation.add(
1533                 location.base(),
1534                 Vector<PromotedHeapLocation>());
1535             ASSERT(!result.iterator->value.contains(location));
1536             result.iterator->value.append(location);
1537         }
1538
1539         m_pointerSSA.reset();
1540         m_allocationSSA.reset();
1541
1542         // Collect the set of "variables" that we will be sinking.
1543         m_locationToVariable.clear();
1544         m_nodeToVariable.clear();
1545         Vector<Node*> indexToNode;
1546         Vector<PromotedHeapLocation> indexToLocation;
1547
1548         for (Node* index : m_sinkCandidates) {
1549             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1550             m_nodeToVariable.add(index, variable);
1551             ASSERT(indexToNode.size() == variable->index());
1552             indexToNode.append(index);
1553         }
1554
1555         for (PromotedHeapLocation location : locations) {
1556             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1557             m_locationToVariable.add(location, variable);
1558             ASSERT(indexToLocation.size() == variable->index());
1559             indexToLocation.append(location);
1560         }
1561
1562         // We insert all required constants at top of block 0 so that
1563         // they are inserted only once and we don't clutter the graph
1564         // with useless constants everywhere
1565         HashMap<FrozenValue*, Node*> lazyMapping;
1566         if (!m_bottom)
1567             m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
1568
1569         Vector<HashSet<PromotedHeapLocation>> hintsForPhi(m_sinkCandidates.size());
1570
1571         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1572             m_heap = m_heapAtHead[block];
1573
1574             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1575                 Node* node = block->at(nodeIndex);
1576
1577                 // Some named properties can be added conditionally,
1578                 // and that would necessitate bottoms
1579                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1580                     if (location.kind() != NamedPropertyPLoc)
1581                         continue;
1582
1583                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1584                     m_pointerSSA.newDef(variable, block, m_bottom);
1585                 }
1586
1587                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1588                     Node* escapee = m_materializationToEscapee.get(materialization);
1589                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1590                 }
1591
1592                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node)) {
1593                     PromotedHeapLocation location = pair.first;
1594                     Node* identifier = pair.second;
1595                     // We're materializing `identifier` at this point, and the unmaterialized
1596                     // version is inside `location`. We track which SSA variable this belongs
1597                     // to in case we also need a PutHint for the Phi.
1598                     if (UNLIKELY(validationEnabled())) {
1599                         RELEASE_ASSERT(m_sinkCandidates.contains(location.base()));
1600                         RELEASE_ASSERT(m_sinkCandidates.contains(identifier));
1601
1602                         bool found = false;
1603                         for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1604                             // We're materializing `identifier` here. This asserts that this is indeed the case.
1605                             if (m_materializationToEscapee.get(materialization) == identifier) {
1606                                 found = true;
1607                                 break;
1608                             }
1609                         }
1610                         RELEASE_ASSERT(found);
1611                     }
1612
1613                     SSACalculator::Variable* variable = m_nodeToVariable.get(identifier);
1614                     hintsForPhi[variable->index()].addVoid(location);
1615                 }
1616
1617                 if (m_sinkCandidates.contains(node))
1618                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1619
1620                 handleNode(
1621                     node,
1622                     [&] (PromotedHeapLocation location, LazyNode value) {
1623                         if (!locations.contains(location))
1624                             return;
1625
1626                         Node* nodeValue;
1627                         if (value.isNode())
1628                             nodeValue = value.asNode();
1629                         else {
1630                             auto iter = lazyMapping.find(value.asValue());
1631                             if (iter != lazyMapping.end())
1632                                 nodeValue = iter->value;
1633                             else {
1634                                 nodeValue = value.ensureIsNode(
1635                                     m_insertionSet, m_graph.block(0), 0);
1636                                 lazyMapping.add(value.asValue(), nodeValue);
1637                             }
1638                         }
1639
1640                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1641                         m_pointerSSA.newDef(variable, block, nodeValue);
1642                     },
1643                     [] (PromotedHeapLocation) -> Node* {
1644                         return nullptr;
1645                     });
1646             }
1647         }
1648         m_insertionSet.execute(m_graph.block(0));
1649
1650         // Run the SSA calculators to create Phis
1651         m_pointerSSA.computePhis(
1652             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1653                 PromotedHeapLocation location = indexToLocation[variable->index()];
1654
1655                 // Don't create Phi nodes for fields of dead allocations
1656                 if (!m_heapAtHead[block].isAllocation(location.base()))
1657                     return nullptr;
1658
1659                 // Don't create Phi nodes once we are escaped
1660                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1661                     return nullptr;
1662
1663                 // If we point to a single allocation, we will
1664                 // directly use its materialization
1665                 if (m_heapAtHead[block].follow(location))
1666                     return nullptr;
1667
1668                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1669                 phiNode->mergeFlags(NodeResultJS);
1670                 return phiNode;
1671             });
1672
1673         m_allocationSSA.computePhis(
1674             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1675                 Node* identifier = indexToNode[variable->index()];
1676
1677                 // Don't create Phi nodes for dead allocations
1678                 if (!m_heapAtHead[block].isAllocation(identifier))
1679                     return nullptr;
1680
1681                 // Don't create Phi nodes until we are escaped
1682                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1683                     return nullptr;
1684
1685                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1686                 phiNode->mergeFlags(NodeResultJS);
1687                 return phiNode;
1688             });
1689
1690         // Place Phis in the right places, replace all uses of any load with the appropriate
1691         // value, and create the materialization nodes.
1692         LocalOSRAvailabilityCalculator availabilityCalculator(m_graph);
1693         m_graph.clearReplacements();
1694         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1695             m_heap = m_heapAtHead[block];
1696             availabilityCalculator.beginBlock(block);
1697
1698             // These mapping tables are intended to be lazy. If
1699             // something is omitted from the table, it means that
1700             // there haven't been any local stores to the promoted
1701             // heap location (or any local materialization).
1702             m_localMapping.clear();
1703             m_escapeeToMaterialization.clear();
1704
1705             // Insert the Phi functions that we had previously
1706             // created.
1707             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1708                 SSACalculator::Variable* variable = phiDef->variable();
1709                 m_insertionSet.insert(0, phiDef->value());
1710
1711                 PromotedHeapLocation location = indexToLocation[variable->index()];
1712                 m_localMapping.set(location, phiDef->value());
1713
1714                 if (m_sinkCandidates.contains(location.base())) {
1715                     m_insertionSet.insert(
1716                         0,
1717                         location.createHint(
1718                             m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1719                 }
1720             }
1721
1722             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1723                 SSACalculator::Variable* variable = phiDef->variable();
1724                 m_insertionSet.insert(0, phiDef->value());
1725
1726                 Node* identifier = indexToNode[variable->index()];
1727                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1728                 bool canExit = false;
1729                 insertOSRHintsForUpdate(
1730                     0, block->at(0)->origin, canExit,
1731                     availabilityCalculator.m_availability, identifier, phiDef->value());
1732
1733                 for (PromotedHeapLocation location : hintsForPhi[variable->index()]) {
1734                     m_insertionSet.insert(0,
1735                         location.createHint(m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1736                     m_localMapping.set(location, phiDef->value());
1737                 }
1738             }
1739
1740             if (verbose) {
1741                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1742                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1743             }
1744
1745             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1746                 Node* node = block->at(nodeIndex);
1747                 bool canExit = true;
1748                 bool nextCanExit = node->origin.exitOK;
1749                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1750                     if (location.kind() != NamedPropertyPLoc)
1751                         continue;
1752
1753                     m_localMapping.set(location, m_bottom);
1754
1755                     if (m_sinkCandidates.contains(node)) {
1756                         if (verbose)
1757                             dataLog("For sink candidate ", node, " found location ", location, "\n");
1758                         m_insertionSet.insert(
1759                             nodeIndex + 1,
1760                             location.createHint(
1761                                 m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
1762                     }
1763                 }
1764
1765                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1766                     materialization->origin.exitOK &= canExit;
1767                     Node* escapee = m_materializationToEscapee.get(materialization);
1768                     populateMaterialization(block, materialization, escapee);
1769                     m_escapeeToMaterialization.set(escapee, materialization);
1770                     m_insertionSet.insert(nodeIndex, materialization);
1771                     if (verbose)
1772                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1773                 }
1774
1775                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1776                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
1777                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node))
1778                     m_insertionSet.insert(nodeIndex, createRecovery(block, pair.first, node, canExit));
1779
1780                 // We need to put the OSR hints after the recoveries,
1781                 // because we only want the hints once the object is
1782                 // complete
1783                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1784                     Node* escapee = m_materializationToEscapee.get(materialization);
1785                     insertOSRHintsForUpdate(
1786                         nodeIndex, node->origin, canExit,
1787                         availabilityCalculator.m_availability, escapee, materialization);
1788                 }
1789
1790                 if (node->origin.exitOK && !canExit) {
1791                     // We indicate that the exit state is fine now. It is OK because we updated the
1792                     // state above. We need to indicate this manually because the validation doesn't
1793                     // have enough information to infer that the exit state is fine.
1794                     m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
1795                 }
1796
1797                 if (m_sinkCandidates.contains(node))
1798                     m_escapeeToMaterialization.set(node, node);
1799
1800                 availabilityCalculator.executeNode(node);
1801
1802                 bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
1803
1804                 bool doLower = false;
1805                 handleNode(
1806                     node,
1807                     [&] (PromotedHeapLocation location, LazyNode value) {
1808                         if (!locations.contains(location))
1809                             return;
1810
1811                         Node* nodeValue;
1812                         if (value.isNode())
1813                             nodeValue = value.asNode();
1814                         else
1815                             nodeValue = lazyMapping.get(value.asValue());
1816
1817                         nodeValue = resolve(block, nodeValue);
1818
1819                         m_localMapping.set(location, nodeValue);
1820
1821                         if (!m_sinkCandidates.contains(location.base()))
1822                             return;
1823
1824                         doLower = true;
1825
1826                         if (verbose)
1827                             dataLog("Creating hint with value ", nodeValue, " before ", node, "\n");
1828                         m_insertionSet.insert(
1829                             nodeIndex + 1,
1830                             location.createHint(
1831                                 m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
1832                     },
1833                     [&] (PromotedHeapLocation location) -> Node* {
1834                         return resolve(block, location);
1835                     });
1836
1837                 if (!nextCanExit && desiredNextExitOK) {
1838                     // We indicate that the exit state is fine now. We need to do this because we
1839                     // emitted hints that appear to invalidate the exit state.
1840                     m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
1841                 }
1842
1843                 if (m_sinkCandidates.contains(node) || doLower) {
1844                     switch (node->op()) {
1845                     case NewObject:
1846                         node->convertToPhantomNewObject();
1847                         break;
1848
1849                     case NewFunction:
1850                         node->convertToPhantomNewFunction();
1851                         break;
1852
1853                     case NewGeneratorFunction:
1854                         node->convertToPhantomNewGeneratorFunction();
1855                         break;
1856
1857                     case NewAsyncFunction:
1858                         node->convertToPhantomNewAsyncFunction();
1859                         break;
1860
1861                     case CreateActivation:
1862                         node->convertToPhantomCreateActivation();
1863                         break;
1864
1865                     default:
1866                         node->remove();
1867                         break;
1868                     }
1869                 }
1870
1871                 m_graph.doToChildren(
1872                     node,
1873                     [&] (Edge& edge) {
1874                         edge.setNode(resolve(block, edge.node()));
1875                     });
1876             }
1877
1878             // Gotta drop some Upsilons.
1879             NodeAndIndex terminal = block->findTerminal();
1880             size_t upsilonInsertionPoint = terminal.index;
1881             NodeOrigin upsilonOrigin = terminal.node->origin;
1882             for (BasicBlock* successorBlock : block->successors()) {
1883                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1884                     Node* phiNode = phiDef->value();
1885                     SSACalculator::Variable* variable = phiDef->variable();
1886                     PromotedHeapLocation location = indexToLocation[variable->index()];
1887                     Node* incoming = resolve(block, location);
1888
1889                     m_insertionSet.insertNode(
1890                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1891                         OpInfo(phiNode), incoming->defaultEdge());
1892                 }
1893
1894                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1895                     Node* phiNode = phiDef->value();
1896                     SSACalculator::Variable* variable = phiDef->variable();
1897                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1898
1899                     m_insertionSet.insertNode(
1900                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1901                         OpInfo(phiNode), incoming->defaultEdge());
1902                 }
1903             }
1904
1905             m_insertionSet.execute(block);
1906         }
1907     }
1908
1909     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1910     {
1911         // If we are currently pointing to a single local allocation,
1912         // simply return the associated materialization.
1913         if (Node* identifier = m_heap.follow(location))
1914             return getMaterialization(block, identifier);
1915
1916         if (Node* result = m_localMapping.get(location))
1917             return result;
1918
1919         // This implies that there is no local mapping. Find a non-local mapping.
1920         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1921             block, m_locationToVariable.get(location));
1922         ASSERT(def);
1923         ASSERT(def->value());
1924
1925         Node* result = def->value();
1926         if (result->replacement())
1927             result = result->replacement();
1928         ASSERT(!result->replacement());
1929
1930         m_localMapping.add(location, result);
1931         return result;
1932     }
1933
1934     Node* resolve(BasicBlock* block, Node* node)
1935     {
1936         // If we are currently pointing to a single local allocation,
1937         // simply return the associated materialization.
1938         if (Node* identifier = m_heap.follow(node))
1939             return getMaterialization(block, identifier);
1940
1941         if (node->replacement())
1942             node = node->replacement();
1943         ASSERT(!node->replacement());
1944
1945         return node;
1946     }
1947
1948     Node* getMaterialization(BasicBlock* block, Node* identifier)
1949     {
1950         ASSERT(m_heap.isAllocation(identifier));
1951         if (!m_sinkCandidates.contains(identifier))
1952             return identifier;
1953
1954         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1955             return materialization;
1956
1957         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1958             block, m_nodeToVariable.get(identifier));
1959         ASSERT(def && def->value());
1960         m_escapeeToMaterialization.add(identifier, def->value());
1961         ASSERT(!def->value()->replacement());
1962         return def->value();
1963     }
1964
1965     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
1966     {
1967         if (verbose) {
1968             dataLog("Inserting OSR hints at ", origin, ":\n");
1969             dataLog("    Escapee: ", escapee, "\n");
1970             dataLog("    Materialization: ", materialization, "\n");
1971             dataLog("    Availability: ", availability, "\n");
1972         }
1973         
1974         // We need to follow() the value in the heap.
1975         // Consider the following graph:
1976         //
1977         // Block #0
1978         //   0: NewObject({})
1979         //   1: NewObject({})
1980         //   -: PutByOffset(@0, @1, x:0)
1981         //   -: PutStructure(@0, {x:0})
1982         //   2: GetByOffset(@0, x:0)
1983         //   -: MovHint(@2, loc1)
1984         //   -: Branch(#1, #2)
1985         //
1986         // Block #1
1987         //   3: Call(f, @1)
1988         //   4: Return(@0)
1989         //
1990         // Block #2
1991         //   -: Return(undefined)
1992         //
1993         // We need to materialize @1 at @3, and when doing so we need
1994         // to insert a MovHint for the materialization into loc1 as
1995         // well.
1996         // In order to do this, we say that we need to insert an
1997         // update hint for any availability whose node resolve()s to
1998         // the materialization.
1999         for (auto entry : availability.m_heap) {
2000             if (!entry.value.hasNode())
2001                 continue;
2002             if (m_heap.follow(entry.value.node()) != escapee)
2003                 continue;
2004
2005             m_insertionSet.insert(
2006                 nodeIndex,
2007                 entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
2008         }
2009
2010         for (unsigned i = availability.m_locals.size(); i--;) {
2011             if (!availability.m_locals[i].hasNode())
2012                 continue;
2013             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
2014                 continue;
2015
2016             int operand = availability.m_locals.operandForIndex(i);
2017             m_insertionSet.insertNode(
2018                 nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
2019                 materialization->defaultEdge());
2020         }
2021     }
2022
2023     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
2024     {
2025         Allocation& allocation = m_heap.getAllocation(escapee);
2026         switch (node->op()) {
2027         case MaterializeNewObject: {
2028             ObjectMaterializationData& data = node->objectMaterializationData();
2029             unsigned firstChild = m_graph.m_varArgChildren.size();
2030
2031             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2032
2033             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
2034             ASSERT(locations.contains(structure));
2035
2036             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
2037
2038             for (PromotedHeapLocation location : locations) {
2039                 switch (location.kind()) {
2040                 case StructurePLoc:
2041                     ASSERT(location == structure);
2042                     break;
2043
2044                 case NamedPropertyPLoc: {
2045                     ASSERT(location.base() == allocation.identifier());
2046                     data.m_properties.append(location.descriptor());
2047                     Node* value = resolve(block, location);
2048                     if (m_sinkCandidates.contains(value))
2049                         m_graph.m_varArgChildren.append(m_bottom);
2050                     else
2051                         m_graph.m_varArgChildren.append(value);
2052                     break;
2053                 }
2054
2055                 default:
2056                     DFG_CRASH(m_graph, node, "Bad location kind");
2057                 }
2058             }
2059
2060             node->children = AdjacencyList(
2061                 AdjacencyList::Variable,
2062                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2063             break;
2064         }
2065
2066         case MaterializeCreateActivation: {
2067             ObjectMaterializationData& data = node->objectMaterializationData();
2068
2069             unsigned firstChild = m_graph.m_varArgChildren.size();
2070
2071             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2072
2073             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
2074             ASSERT(locations.contains(symbolTable));
2075             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
2076             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
2077
2078             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
2079             ASSERT(locations.contains(scope));
2080             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
2081
2082             for (PromotedHeapLocation location : locations) {
2083                 switch (location.kind()) {
2084                 case ActivationScopePLoc: {
2085                     ASSERT(location == scope);
2086                     break;
2087                 }
2088
2089                 case ActivationSymbolTablePLoc: {
2090                     ASSERT(location == symbolTable);
2091                     break;
2092                 }
2093
2094                 case ClosureVarPLoc: {
2095                     ASSERT(location.base() == allocation.identifier());
2096                     data.m_properties.append(location.descriptor());
2097                     Node* value = resolve(block, location);
2098                     if (m_sinkCandidates.contains(value))
2099                         m_graph.m_varArgChildren.append(m_bottom);
2100                     else
2101                         m_graph.m_varArgChildren.append(value);
2102                     break;
2103                 }
2104
2105                 default:
2106                     DFG_CRASH(m_graph, node, "Bad location kind");
2107                 }
2108             }
2109
2110             node->children = AdjacencyList(
2111                 AdjacencyList::Variable,
2112                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2113             break;
2114         }
2115         
2116         case NewFunction:
2117         case NewGeneratorFunction:
2118         case NewAsyncFunction: {
2119             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2120             ASSERT(locations.size() == 2);
2121                 
2122             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
2123             ASSERT_UNUSED(executable, locations.contains(executable));
2124                 
2125             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
2126             ASSERT(locations.contains(activation));
2127
2128             node->child1() = Edge(resolve(block, activation), KnownCellUse);
2129             break;
2130         }
2131
2132         default:
2133             DFG_CRASH(m_graph, node, "Bad materialize op");
2134         }
2135     }
2136
2137     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
2138     {
2139         if (verbose)
2140             dataLog("Recovering ", location, " at ", where, "\n");
2141         ASSERT(location.base()->isPhantomAllocation());
2142         Node* base = getMaterialization(block, location.base());
2143         Node* value = resolve(block, location);
2144
2145         NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
2146
2147         if (verbose)
2148             dataLog("Base is ", base, " and value is ", value, "\n");
2149
2150         if (base->isPhantomAllocation()) {
2151             return PromotedHeapLocation(base, location.descriptor()).createHint(
2152                 m_graph, origin.takeValidExit(canExit), value);
2153         }
2154
2155         switch (location.kind()) {
2156         case NamedPropertyPLoc: {
2157             Allocation& allocation = m_heap.getAllocation(location.base());
2158
2159             Vector<RegisteredStructure> structures;
2160             structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2161             unsigned identifierNumber = location.info();
2162             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2163
2164             std::sort(
2165                 structures.begin(),
2166                 structures.end(),
2167                 [uid] (RegisteredStructure a, RegisteredStructure b) -> bool {
2168                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2169                 });
2170
2171             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2172
2173             if (firstOffset == structures.last()->getConcurrently(uid)) {
2174                 Node* storage = base;
2175                 // FIXME: When we decide to sink objects with a
2176                 // property storage, we should handle non-inline offsets.
2177                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2178
2179                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2180                 data->offset = firstOffset;
2181                 data->identifierNumber = identifierNumber;
2182
2183                 return m_graph.addNode(
2184                     PutByOffset,
2185                     origin.takeValidExit(canExit),
2186                     OpInfo(data),
2187                     Edge(storage, KnownCellUse),
2188                     Edge(base, KnownCellUse),
2189                     value->defaultEdge());
2190             }
2191
2192             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2193             data->identifierNumber = identifierNumber;
2194
2195             {
2196                 PropertyOffset currentOffset = firstOffset;
2197                 StructureSet currentSet;
2198                 for (RegisteredStructure structure : structures) {
2199                     PropertyOffset offset = structure->getConcurrently(uid);
2200                     if (offset != currentOffset) {
2201                         // Because our analysis treats MultiPutByOffset like an escape, we only have to
2202                         // deal with storing results that would have been previously stored by PutByOffset
2203                         // nodes. Those nodes were guarded by the appropriate type checks. This means that
2204                         // at this point, we can simply trust that the incoming value has the right type
2205                         // for whatever structure we are using.
2206                         data->variants.append(
2207                             PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2208                         currentOffset = offset;
2209                         currentSet.clear();
2210                     }
2211                     currentSet.add(structure.get());
2212                 }
2213                 data->variants.append(
2214                     PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2215             }
2216
2217             return m_graph.addNode(
2218                 MultiPutByOffset,
2219                 origin.takeValidExit(canExit),
2220                 OpInfo(data),
2221                 Edge(base, KnownCellUse),
2222                 value->defaultEdge());
2223             break;
2224         }
2225
2226         case ClosureVarPLoc: {
2227             return m_graph.addNode(
2228                 PutClosureVar,
2229                 origin.takeValidExit(canExit),
2230                 OpInfo(location.info()),
2231                 Edge(base, KnownCellUse),
2232                 value->defaultEdge());
2233             break;
2234         }
2235
2236         default:
2237             DFG_CRASH(m_graph, base, "Bad location kind");
2238             break;
2239         }
2240     }
2241
2242     // This is a great way of asking value->isStillValid() without having to worry about getting
2243     // different answers. It turns out that this analysis works OK regardless of what this
2244     // returns but breaks badly if this changes its mind for any particular InferredValue. This
2245     // method protects us from that.
2246     bool isStillValid(InferredValue* value)
2247     {
2248         return m_validInferredValues.add(value, value->isStillValid()).iterator->value;
2249     }
2250
2251     SSACalculator m_pointerSSA;
2252     SSACalculator m_allocationSSA;
2253     NodeSet m_sinkCandidates;
2254     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2255     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2256     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2257     HashMap<Node*, Node*> m_escapeeToMaterialization;
2258     InsertionSet m_insertionSet;
2259     CombinedLiveness m_combinedLiveness;
2260
2261     HashMap<InferredValue*, bool> m_validInferredValues;
2262
2263     HashMap<Node*, Node*> m_materializationToEscapee;
2264     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2265     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2266     HashMap<Node*, Vector<std::pair<PromotedHeapLocation, Node*>>> m_materializationSiteToHints;
2267
2268     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2269
2270     BlockMap<LocalHeap> m_heapAtHead;
2271     BlockMap<LocalHeap> m_heapAtTail;
2272     LocalHeap m_heap;
2273
2274     Node* m_bottom = nullptr;
2275 };
2276
2277 }
2278
2279 bool performObjectAllocationSinking(Graph& graph)
2280 {
2281     return runPhase<ObjectAllocationSinkingPhase>(graph);
2282 }
2283
2284 } } // namespace JSC::DFG
2285
2286 #endif // ENABLE(DFG_JIT)