Add better crash logging for allocation sinking phase
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGClobbersExitState.h"
33 #include "DFGCombinedLiveness.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertionSet.h"
36 #include "DFGLazyNode.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromotedHeapLocation.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 #include <list>
46
47 namespace JSC { namespace DFG {
48
49 namespace {
50
51 NO_RETURN_DUE_TO_CRASH NEVER_INLINE void crash(const char*, int line, int)
52 {
53     CRASH_WITH_SECURITY_IMPLICATION_AND_INFO(line);
54 }
55
56 #undef RELEASE_ASSERT
57 #define RELEASE_ASSERT(assertion) do { \
58     if (!(assertion)) { \
59         WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
60         crash(__FILE__, __LINE__, __COUNTER__); \
61     } \
62 } while (0)
63
64 bool verbose = false;
65
66 // In order to sink object cycles, we use a points-to analysis coupled
67 // with an escape analysis. This analysis is actually similar to an
68 // abstract interpreter focused on local allocations and ignoring
69 // everything else.
70 //
71 // We represent the local heap using two mappings:
72 //
73 // - A set of the local allocations present in the function, where
74 //   each of those have a further mapping from
75 //   PromotedLocationDescriptor to local allocations they must point
76 //   to.
77 //
78 // - A "pointer" mapping from nodes to local allocations, if they must
79 //   be equal to said local allocation and are currently live. This
80 //   can be because the node is the actual node that created the
81 //   allocation, or any other node that must currently point to it -
82 //   we don't make a difference.
83 //
84 // The following graph is a motivation for why we separate allocations
85 // from pointers:
86 //
87 // Block #0
88 //  0: NewObject({})
89 //  1: NewObject({})
90 //  -: PutByOffset(@0, @1, x)
91 //  -: PutStructure(@0, {x:0})
92 //  2: GetByOffset(@0, x)
93 //  -: Jump(#1)
94 //
95 // Block #1
96 //  -: Return(@2)
97 //
98 // Here, we need to remember in block #1 that @2 points to a local
99 // allocation with appropriate fields and structures information
100 // (because we should be able to place a materialization on top of
101 // block #1 here), even though @1 is dead. We *could* just keep @1
102 // artificially alive here, but there is no real reason to do it:
103 // after all, by the end of block #0, @1 and @2 should be completely
104 // interchangeable, and there is no reason for us to artificially make
105 // @1 more important.
106 //
107 // An important point to consider to understand this separation is
108 // that we should think of the local heap as follow: we have a
109 // bunch of nodes that are pointers to "allocations" that live
110 // someplace on the heap, and those allocations can have pointers in
111 // between themselves as well. We shouldn't care about whatever
112 // names we give to the allocations ; what matters when
113 // comparing/merging two heaps is the isomorphism/comparison between
114 // the allocation graphs as seen by the nodes.
115 //
116 // For instance, in the following graph:
117 //
118 // Block #0
119 //  0: NewObject({})
120 //  -: Branch(#1, #2)
121 //
122 // Block #1
123 //  1: NewObject({})
124 //  -: PutByOffset(@0, @1, x)
125 //  -: PutStructure(@0, {x:0})
126 //  -: Jump(#3)
127 //
128 // Block #2
129 //  2: NewObject({})
130 //  -: PutByOffset(@2, undefined, x)
131 //  -: PutStructure(@2, {x:0})
132 //  -: PutByOffset(@0, @2, x)
133 //  -: PutStructure(@0, {x:0})
134 //  -: Jump(#3)
135 //
136 // Block #3
137 //  -: Return(@0)
138 //
139 // we should think of the heaps at tail of blocks #1 and #2 as being
140 // exactly the same, even though one has @0.x pointing to @1 and the
141 // other has @0.x pointing to @2, because in essence this should not
142 // be different from the graph where we hoisted @1 and @2 into a
143 // single allocation in block #0. We currently will not handle this
144 // case, because we merge allocations based on the node they are
145 // coming from, but this is only a technicality for the sake of
146 // simplicity that shouldn't hide the deeper idea outlined here.
147
148 class Allocation {
149 public:
150     // We use Escaped as a special allocation kind because when we
151     // decide to sink an allocation, we still need to keep track of it
152     // once it is escaped if it still has pointers to it in order to
153     // replace any use of those pointers by the corresponding
154     // materialization
155     enum class Kind { Escaped, Object, Activation, Function, GeneratorFunction, AsyncFunction };
156
157     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
158         : m_identifier(identifier)
159         , m_kind(kind)
160     {
161     }
162
163
164     const HashMap<PromotedLocationDescriptor, Node*>& fields() const
165     {
166         return m_fields;
167     }
168
169     Node* get(PromotedLocationDescriptor descriptor)
170     {
171         return m_fields.get(descriptor);
172     }
173
174     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
175     {
176         // Pointing to anything else than an unescaped local
177         // allocation is represented by simply not having the
178         // field
179         if (value)
180             m_fields.set(descriptor, value);
181         else
182             m_fields.remove(descriptor);
183         return *this;
184     }
185
186     void remove(PromotedLocationDescriptor descriptor)
187     {
188         set(descriptor, nullptr);
189     }
190
191     bool hasStructures() const
192     {
193         switch (kind()) {
194         case Kind::Object:
195             return true;
196
197         default:
198             return false;
199         }
200     }
201
202     Allocation& setStructures(const RegisteredStructureSet& structures)
203     {
204         ASSERT(hasStructures() && !structures.isEmpty());
205         m_structures = structures;
206         return *this;
207     }
208
209     Allocation& mergeStructures(const RegisteredStructureSet& structures)
210     {
211         ASSERT(hasStructures() || structures.isEmpty());
212         m_structures.merge(structures);
213         return *this;
214     }
215
216     Allocation& filterStructures(const RegisteredStructureSet& structures)
217     {
218         ASSERT(hasStructures());
219         m_structures.filter(structures);
220         return *this;
221     }
222
223     const RegisteredStructureSet& structures() const
224     {
225         return m_structures;
226     }
227
228     Node* identifier() const { return m_identifier; }
229
230     Kind kind() const { return m_kind; }
231
232     bool isEscapedAllocation() const
233     {
234         return kind() == Kind::Escaped;
235     }
236
237     bool isObjectAllocation() const
238     {
239         return m_kind == Kind::Object;
240     }
241
242     bool isActivationAllocation() const
243     {
244         return m_kind == Kind::Activation;
245     }
246
247     bool isFunctionAllocation() const
248     {
249         return m_kind == Kind::Function || m_kind == Kind::GeneratorFunction || m_kind == Kind::AsyncFunction;
250     }
251
252     bool operator==(const Allocation& other) const
253     {
254         return m_identifier == other.m_identifier
255             && m_kind == other.m_kind
256             && m_fields == other.m_fields
257             && m_structures == other.m_structures;
258     }
259
260     bool operator!=(const Allocation& other) const
261     {
262         return !(*this == other);
263     }
264
265     void dump(PrintStream& out) const
266     {
267         dumpInContext(out, nullptr);
268     }
269
270     void dumpInContext(PrintStream& out, DumpContext* context) const
271     {
272         switch (m_kind) {
273         case Kind::Escaped:
274             out.print("Escaped");
275             break;
276
277         case Kind::Object:
278             out.print("Object");
279             break;
280
281         case Kind::Function:
282             out.print("Function");
283             break;
284
285         case Kind::GeneratorFunction:
286             out.print("GeneratorFunction");
287             break;
288
289         case Kind::AsyncFunction:
290             out.print("AsyncFunction");
291             break;
292
293         case Kind::Activation:
294             out.print("Activation");
295             break;
296         }
297         out.print("Allocation(");
298         if (!m_structures.isEmpty())
299             out.print(inContext(m_structures.toStructureSet(), context));
300         if (!m_fields.isEmpty()) {
301             if (!m_structures.isEmpty())
302                 out.print(", ");
303             out.print(mapDump(m_fields, " => #", ", "));
304         }
305         out.print(")");
306     }
307
308 private:
309     Node* m_identifier; // This is the actual node that created the allocation
310     Kind m_kind;
311     HashMap<PromotedLocationDescriptor, Node*> m_fields;
312     RegisteredStructureSet m_structures;
313 };
314
315 class LocalHeap {
316 public:
317     Allocation& newAllocation(Node* node, Allocation::Kind kind)
318     {
319         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
320         m_pointers.add(node, node);
321         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
322     }
323
324     bool isAllocation(Node* identifier) const
325     {
326         return m_allocations.contains(identifier);
327     }
328
329     // Note that this is fundamentally different from
330     // onlyLocalAllocation() below. getAllocation() takes as argument
331     // a node-as-identifier, that is, an allocation node. This
332     // allocation node doesn't have to be alive; it may only be
333     // pointed to by other nodes or allocation fields.
334     // For instance, in the following graph:
335     //
336     // Block #0
337     //  0: NewObject({})
338     //  1: NewObject({})
339     //  -: PutByOffset(@0, @1, x)
340     //  -: PutStructure(@0, {x:0})
341     //  2: GetByOffset(@0, x)
342     //  -: Jump(#1)
343     //
344     // Block #1
345     //  -: Return(@2)
346     //
347     // At head of block #1, the only reachable allocation is #@1,
348     // which can be reached through node @2. Thus, getAllocation(#@1)
349     // contains the appropriate metadata for this allocation, but
350     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
351     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
352     // is the same as getAllocation(#@1), while getAllocation(#@2)
353     // does not make sense since @2 is not an allocation node.
354     //
355     // This is meant to be used when the node is already known to be
356     // an identifier (i.e. an allocation) - probably because it was
357     // found as value of a field or pointer in the current heap, or
358     // was the result of a call to follow(). In any other cases (such
359     // as when doing anything while traversing the graph), the
360     // appropriate function to call is probably onlyLocalAllocation.
361     Allocation& getAllocation(Node* identifier)
362     {
363         auto iter = m_allocations.find(identifier);
364         ASSERT(iter != m_allocations.end());
365         return iter->value;
366     }
367
368     void newPointer(Node* node, Node* identifier)
369     {
370         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
371         ASSERT(isAllocation(identifier));
372         m_pointers.add(node, identifier);
373     }
374
375     // follow solves the points-to problem. Given a live node, which
376     // may be either an allocation itself or a heap read (e.g. a
377     // GetByOffset node), it returns the corresponding allocation
378     // node, if there is one. If the argument node is neither an
379     // allocation or a heap read, or may point to different nodes,
380     // nullptr will be returned. Note that a node that points to
381     // different nodes can never point to an unescaped local
382     // allocation.
383     Node* follow(Node* node) const
384     {
385         auto iter = m_pointers.find(node);
386         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
387         return iter == m_pointers.end() ? nullptr : iter->value;
388     }
389
390     Node* follow(PromotedHeapLocation location) const
391     {
392         const Allocation& base = m_allocations.find(location.base())->value;
393         auto iter = base.fields().find(location.descriptor());
394
395         if (iter == base.fields().end())
396             return nullptr;
397
398         return iter->value;
399     }
400
401     // onlyLocalAllocation find the corresponding allocation metadata
402     // for any live node. onlyLocalAllocation(node) is essentially
403     // getAllocation(follow(node)), with appropriate null handling.
404     Allocation* onlyLocalAllocation(Node* node)
405     {
406         Node* identifier = follow(node);
407         if (!identifier)
408             return nullptr;
409
410         return &getAllocation(identifier);
411     }
412
413     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
414     {
415         Node* identifier = follow(location);
416         if (!identifier)
417             return nullptr;
418
419         return &getAllocation(identifier);
420     }
421
422     // This allows us to store the escapees only when necessary. If
423     // set, the current escapees can be retrieved at any time using
424     // takeEscapees(), which will clear the cached set of escapees;
425     // otherwise the heap won't remember escaping allocations.
426     void setWantEscapees()
427     {
428         m_wantEscapees = true;
429     }
430
431     HashMap<Node*, Allocation> takeEscapees()
432     {
433         return WTFMove(m_escapees);
434     }
435
436     void escape(Node* node)
437     {
438         Node* identifier = follow(node);
439         if (!identifier)
440             return;
441
442         escapeAllocation(identifier);
443     }
444
445     void merge(const LocalHeap& other)
446     {
447         assertIsValid();
448         other.assertIsValid();
449         ASSERT(!m_wantEscapees);
450
451         if (!reached()) {
452             ASSERT(other.reached());
453             *this = other;
454             return;
455         }
456
457         NodeSet toEscape;
458
459         for (auto& allocationEntry : other.m_allocations)
460             m_allocations.add(allocationEntry.key, allocationEntry.value);
461         for (auto& allocationEntry : m_allocations) {
462             auto allocationIter = other.m_allocations.find(allocationEntry.key);
463
464             // If we have it and they don't, it died for them but we
465             // are keeping it alive from another field somewhere.
466             // There is nothing to do - we will be escaped
467             // automatically when we handle that other field.
468             // This will also happen for allocation that we have and
469             // they don't, and all of those will get pruned.
470             if (allocationIter == other.m_allocations.end())
471                 continue;
472
473             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
474                 toEscape.addVoid(allocationEntry.key);
475                 for (const auto& fieldEntry : allocationIter->value.fields())
476                     toEscape.addVoid(fieldEntry.value);
477             } else {
478                 mergePointerSets(
479                     allocationEntry.value.fields(), allocationIter->value.fields(),
480                     [&] (Node* identifier) {
481                         toEscape.addVoid(identifier);
482                     },
483                     [&] (PromotedLocationDescriptor field) {
484                         allocationEntry.value.remove(field);
485                     });
486                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
487             }
488         }
489
490         mergePointerSets(m_pointers, other.m_pointers,
491             [&] (Node* identifier) {
492                 toEscape.addVoid(identifier);
493             },
494             [&] (Node* field) {
495                 m_pointers.remove(field);
496             });
497
498         for (Node* identifier : toEscape)
499             escapeAllocation(identifier);
500
501         if (!ASSERT_DISABLED) {
502             for (const auto& entry : m_allocations)
503                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
504         }
505
506         // If there is no remaining pointer to an allocation, we can
507         // remove it. This should only happen for escaped allocations,
508         // because we only merge liveness-pruned heaps in the first
509         // place.
510         prune();
511
512         assertIsValid();
513     }
514
515     void pruneByLiveness(const NodeSet& live)
516     {
517         Vector<Node*> toRemove;
518         for (const auto& entry : m_pointers) {
519             if (!live.contains(entry.key))
520                 toRemove.append(entry.key);
521         }
522         for (Node* node : toRemove)
523             m_pointers.remove(node);
524
525         prune();
526     }
527
528     void assertIsValid() const
529     {
530         if (ASSERT_DISABLED)
531             return;
532
533         // Pointers should point to an actual allocation
534         for (const auto& entry : m_pointers) {
535             ASSERT_UNUSED(entry, entry.value);
536             ASSERT(m_allocations.contains(entry.value));
537         }
538
539         for (const auto& allocationEntry : m_allocations) {
540             // Fields should point to an actual allocation
541             for (const auto& fieldEntry : allocationEntry.value.fields()) {
542                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
543                 ASSERT(m_allocations.contains(fieldEntry.value));
544             }
545         }
546     }
547
548     bool operator==(const LocalHeap& other) const
549     {
550         assertIsValid();
551         other.assertIsValid();
552         return m_allocations == other.m_allocations
553             && m_pointers == other.m_pointers;
554     }
555
556     bool operator!=(const LocalHeap& other) const
557     {
558         return !(*this == other);
559     }
560
561     const HashMap<Node*, Allocation>& allocations() const
562     {
563         return m_allocations;
564     }
565
566     const HashMap<Node*, Node*>& pointers() const
567     {
568         return m_pointers;
569     }
570
571     void dump(PrintStream& out) const
572     {
573         out.print("  Allocations:\n");
574         for (const auto& entry : m_allocations)
575             out.print("    #", entry.key, ": ", entry.value, "\n");
576         out.print("  Pointers:\n");
577         for (const auto& entry : m_pointers)
578             out.print("    ", entry.key, " => #", entry.value, "\n");
579     }
580
581     bool reached() const
582     {
583         return m_reached;
584     }
585
586     void setReached()
587     {
588         m_reached = true;
589     }
590
591 private:
592     // When we merge two heaps, we escape all fields of allocations,
593     // unless they point to the same thing in both heaps.
594     // The reason for this is that it allows us not to do extra work
595     // for diamond graphs where we would otherwise have to check
596     // whether we have a single definition or not, which would be
597     // cumbersome.
598     //
599     // Note that we should try to unify nodes even when they are not
600     // from the same allocation; for instance we should be able to
601     // completely eliminate all allocations from the following graph:
602     //
603     // Block #0
604     //  0: NewObject({})
605     //  -: Branch(#1, #2)
606     //
607     // Block #1
608     //  1: NewObject({})
609     //  -: PutByOffset(@1, "left", val)
610     //  -: PutStructure(@1, {val:0})
611     //  -: PutByOffset(@0, @1, x)
612     //  -: PutStructure(@0, {x:0})
613     //  -: Jump(#3)
614     //
615     // Block #2
616     //  2: NewObject({})
617     //  -: PutByOffset(@2, "right", val)
618     //  -: PutStructure(@2, {val:0})
619     //  -: PutByOffset(@0, @2, x)
620     //  -: PutStructure(@0, {x:0})
621     //  -: Jump(#3)
622     //
623     // Block #3:
624     //  3: GetByOffset(@0, x)
625     //  4: GetByOffset(@3, val)
626     //  -: Return(@4)
627     template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
628     void mergePointerSets(
629         const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
630         const EscapeFunctor& escape, const RemoveFunctor& remove)
631     {
632         Vector<Key> toRemove;
633         for (const auto& entry : my) {
634             auto iter = their.find(entry.key);
635             if (iter == their.end()) {
636                 toRemove.append(entry.key);
637                 escape(entry.value);
638             } else if (iter->value != entry.value) {
639                 toRemove.append(entry.key);
640                 escape(entry.value);
641                 escape(iter->value);
642             }
643         }
644         for (const auto& entry : their) {
645             if (my.contains(entry.key))
646                 continue;
647             escape(entry.value);
648         }
649         for (Key key : toRemove)
650             remove(key);
651     }
652
653     void escapeAllocation(Node* identifier)
654     {
655         Allocation& allocation = getAllocation(identifier);
656         if (allocation.isEscapedAllocation())
657             return;
658
659         Allocation unescaped = WTFMove(allocation);
660         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
661
662         for (const auto& entry : unescaped.fields())
663             escapeAllocation(entry.value);
664
665         if (m_wantEscapees)
666             m_escapees.add(unescaped.identifier(), WTFMove(unescaped));
667     }
668
669     void prune()
670     {
671         NodeSet reachable;
672         for (const auto& entry : m_pointers)
673             reachable.addVoid(entry.value);
674
675         // Repeatedly mark as reachable allocations in fields of other
676         // reachable allocations
677         {
678             Vector<Node*> worklist;
679             worklist.appendRange(reachable.begin(), reachable.end());
680
681             while (!worklist.isEmpty()) {
682                 Node* identifier = worklist.takeLast();
683                 Allocation& allocation = m_allocations.find(identifier)->value;
684                 for (const auto& entry : allocation.fields()) {
685                     if (reachable.add(entry.value).isNewEntry)
686                         worklist.append(entry.value);
687                 }
688             }
689         }
690
691         // Remove unreachable allocations
692         {
693             Vector<Node*> toRemove;
694             for (const auto& entry : m_allocations) {
695                 if (!reachable.contains(entry.key))
696                     toRemove.append(entry.key);
697             }
698             for (Node* identifier : toRemove)
699                 m_allocations.remove(identifier);
700         }
701     }
702
703     bool m_reached = false;
704     HashMap<Node*, Node*> m_pointers;
705     HashMap<Node*, Allocation> m_allocations;
706
707     bool m_wantEscapees = false;
708     HashMap<Node*, Allocation> m_escapees;
709 };
710
711 class ObjectAllocationSinkingPhase : public Phase {
712 public:
713     ObjectAllocationSinkingPhase(Graph& graph)
714         : Phase(graph, "object allocation elimination")
715         , m_pointerSSA(graph)
716         , m_allocationSSA(graph)
717         , m_insertionSet(graph)
718     {
719     }
720
721     bool run()
722     {
723         ASSERT(m_graph.m_form == SSA);
724         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
725
726         if (!performSinking())
727             return false;
728
729         if (verbose) {
730             dataLog("Graph after elimination:\n");
731             m_graph.dump();
732         }
733
734         return true;
735     }
736
737 private:
738     bool performSinking()
739     {
740         m_graph.computeRefCounts();
741         m_graph.initializeNodeOwners();
742         m_graph.ensureDominators();
743         performLivenessAnalysis(m_graph);
744         performOSRAvailabilityAnalysis(m_graph);
745         m_combinedLiveness = CombinedLiveness(m_graph);
746
747         CString graphBeforeSinking;
748         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
749             StringPrintStream out;
750             m_graph.dump(out);
751             graphBeforeSinking = out.toCString();
752         }
753
754         if (verbose) {
755             dataLog("Graph before elimination:\n");
756             m_graph.dump();
757         }
758
759         performAnalysis();
760
761         if (!determineSinkCandidates())
762             return false;
763
764         if (verbose) {
765             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
766                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
767                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
768             }
769         }
770
771         promoteLocalHeap();
772
773         if (Options::validateGraphAtEachPhase())
774             DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
775         return true;
776     }
777
778     void performAnalysis()
779     {
780         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
781         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
782
783         bool changed;
784         do {
785             if (verbose)
786                 dataLog("Doing iteration of escape analysis.\n");
787             changed = false;
788
789             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
790                 m_heapAtHead[block].setReached();
791                 m_heap = m_heapAtHead[block];
792
793                 for (Node* node : *block) {
794                     handleNode(
795                         node,
796                         [] (PromotedHeapLocation, LazyNode) { },
797                         [&] (PromotedHeapLocation) -> Node* {
798                             return nullptr;
799                         });
800                 }
801
802                 if (m_heap == m_heapAtTail[block])
803                     continue;
804
805                 m_heapAtTail[block] = m_heap;
806                 changed = true;
807
808                 m_heap.assertIsValid();
809
810                 // We keep only pointers that are live, and only
811                 // allocations that are either live, pointed to by a
812                 // live pointer, or (recursively) stored in a field of
813                 // a live allocation.
814                 //
815                 // This means we can accidentaly leak non-dominating
816                 // nodes into the successor. However, due to the
817                 // non-dominance property, we are guaranteed that the
818                 // successor has at least one predecessor that is not
819                 // dominated either: this means any reference to a
820                 // non-dominating allocation in the successor will
821                 // trigger an escape and get pruned during the merge.
822                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
823
824                 for (BasicBlock* successorBlock : block->successors())
825                     m_heapAtHead[successorBlock].merge(m_heap);
826             }
827         } while (changed);
828     }
829
830     template<typename WriteFunctor, typename ResolveFunctor>
831     void handleNode(
832         Node* node,
833         const WriteFunctor& heapWrite,
834         const ResolveFunctor& heapResolve)
835     {
836         m_heap.assertIsValid();
837         ASSERT(m_heap.takeEscapees().isEmpty());
838
839         Allocation* target = nullptr;
840         HashMap<PromotedLocationDescriptor, LazyNode> writes;
841         PromotedLocationDescriptor exactRead;
842
843         switch (node->op()) {
844         case NewObject:
845             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
846             target->setStructures(node->structure());
847             writes.add(
848                 StructurePLoc, LazyNode(m_graph.freeze(node->structure().get())));
849             break;
850
851         case NewFunction:
852         case NewGeneratorFunction:
853         case NewAsyncFunction: {
854             if (isStillValid(node->castOperand<FunctionExecutable*>()->singletonFunction())) {
855                 m_heap.escape(node->child1().node());
856                 break;
857             }
858
859             if (node->op() == NewGeneratorFunction)
860                 target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction);
861             else if (node->op() == NewAsyncFunction)
862                 target = &m_heap.newAllocation(node, Allocation::Kind::AsyncFunction);
863             else
864                 target = &m_heap.newAllocation(node, Allocation::Kind::Function);
865
866             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
867             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
868             break;
869         }
870
871         case CreateActivation: {
872             if (isStillValid(node->castOperand<SymbolTable*>()->singletonScope())) {
873                 m_heap.escape(node->child1().node());
874                 break;
875             }
876             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
877             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
878             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
879             {
880                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
881                 ConcurrentJSLocker locker(symbolTable->m_lock);
882                 LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
883                 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
884                     writes.add(
885                         PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
886                         initialValue);
887                 }
888             }
889             break;
890         }
891
892         case PutStructure:
893             target = m_heap.onlyLocalAllocation(node->child1().node());
894             if (target && target->isObjectAllocation()) {
895                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next.get()))));
896                 target->setStructures(node->transition()->next);
897             } else
898                 m_heap.escape(node->child1().node());
899             break;
900
901         case CheckStructure: {
902             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
903             if (allocation && allocation->isObjectAllocation()) {
904                 allocation->filterStructures(node->structureSet());
905                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
906                     node->convertToCheckStructureImmediate(value);
907             } else
908                 m_heap.escape(node->child1().node());
909             break;
910         }
911
912         case GetByOffset:
913         case GetGetterSetterByOffset:
914             target = m_heap.onlyLocalAllocation(node->child2().node());
915             if (target && target->isObjectAllocation()) {
916                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
917                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
918             } else {
919                 m_heap.escape(node->child1().node());
920                 m_heap.escape(node->child2().node());
921             }
922             break;
923
924         case MultiGetByOffset: {
925             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
926             if (allocation && allocation->isObjectAllocation()) {
927                 MultiGetByOffsetData& data = node->multiGetByOffsetData();
928                 RegisteredStructureSet validStructures;
929                 bool hasInvalidStructures = false;
930                 for (const auto& multiGetByOffsetCase : data.cases) {
931                     if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
932                         continue;
933
934                     switch (multiGetByOffsetCase.method().kind()) {
935                     case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
936                     case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
937                         hasInvalidStructures = true;
938                         break;
939
940                     case GetByOffsetMethod::Load: // We're good
941                         validStructures.merge(multiGetByOffsetCase.set());
942                         break;
943
944                     default:
945                         RELEASE_ASSERT_NOT_REACHED();
946                     }
947                 }
948                 if (hasInvalidStructures) {
949                     m_heap.escape(node->child1().node());
950                     break;
951                 }
952                 unsigned identifierNumber = data.identifierNumber;
953                 PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
954                 if (Node* value = heapResolve(location)) {
955                     if (allocation->structures().isSubsetOf(validStructures))
956                         node->replaceWith(value);
957                     else {
958                         Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
959                         ASSERT(structure);
960                         allocation->filterStructures(validStructures);
961                         node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
962                         node->convertToCheckStructureImmediate(structure);
963                         node->setReplacement(value);
964                     }
965                 } else if (!allocation->structures().isSubsetOf(validStructures)) {
966                     // Even though we don't need the result here, we still need
967                     // to make the call to tell our caller that we could need
968                     // the StructurePLoc.
969                     // The reason for this is that when we decide not to sink a
970                     // node, we will still lower any read to its fields before
971                     // it escapes (which are usually reads across a function
972                     // call that DFGClobberize can't handle) - but we only do
973                     // this for PromotedHeapLocations that we have seen read
974                     // during the analysis!
975                     heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
976                     allocation->filterStructures(validStructures);
977                 }
978                 Node* identifier = allocation->get(location.descriptor());
979                 if (identifier)
980                     m_heap.newPointer(node, identifier);
981             } else
982                 m_heap.escape(node->child1().node());
983             break;
984         }
985
986         case PutByOffset:
987             target = m_heap.onlyLocalAllocation(node->child2().node());
988             if (target && target->isObjectAllocation()) {
989                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
990                 writes.add(
991                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
992                     LazyNode(node->child3().node()));
993             } else {
994                 m_heap.escape(node->child1().node());
995                 m_heap.escape(node->child2().node());
996                 m_heap.escape(node->child3().node());
997             }
998             break;
999
1000         case GetClosureVar:
1001             target = m_heap.onlyLocalAllocation(node->child1().node());
1002             if (target && target->isActivationAllocation()) {
1003                 exactRead =
1004                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
1005             } else
1006                 m_heap.escape(node->child1().node());
1007             break;
1008
1009         case PutClosureVar:
1010             target = m_heap.onlyLocalAllocation(node->child1().node());
1011             if (target && target->isActivationAllocation()) {
1012                 writes.add(
1013                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
1014                     LazyNode(node->child2().node()));
1015             } else {
1016                 m_heap.escape(node->child1().node());
1017                 m_heap.escape(node->child2().node());
1018             }
1019             break;
1020
1021         case SkipScope:
1022             target = m_heap.onlyLocalAllocation(node->child1().node());
1023             if (target && target->isActivationAllocation())
1024                 exactRead = ActivationScopePLoc;
1025             else
1026                 m_heap.escape(node->child1().node());
1027             break;
1028
1029         case GetExecutable:
1030             target = m_heap.onlyLocalAllocation(node->child1().node());
1031             if (target && target->isFunctionAllocation())
1032                 exactRead = FunctionExecutablePLoc;
1033             else
1034                 m_heap.escape(node->child1().node());
1035             break;
1036
1037         case GetScope:
1038             target = m_heap.onlyLocalAllocation(node->child1().node());
1039             if (target && target->isFunctionAllocation())
1040                 exactRead = FunctionActivationPLoc;
1041             else
1042                 m_heap.escape(node->child1().node());
1043             break;
1044
1045         case Check:
1046             m_graph.doToChildren(
1047                 node,
1048                 [&] (Edge edge) {
1049                     if (edge.willNotHaveCheck())
1050                         return;
1051
1052                     if (alreadyChecked(edge.useKind(), SpecObject))
1053                         return;
1054
1055                     m_heap.escape(edge.node());
1056                 });
1057             break;
1058
1059         case MovHint:
1060         case PutHint:
1061             // Handled by OSR availability analysis
1062             break;
1063
1064         default:
1065             m_graph.doToChildren(
1066                 node,
1067                 [&] (Edge edge) {
1068                     m_heap.escape(edge.node());
1069                 });
1070             break;
1071         }
1072
1073         if (exactRead) {
1074             ASSERT(target);
1075             ASSERT(writes.isEmpty());
1076             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1077                 ASSERT(!value->replacement());
1078                 node->replaceWith(value);
1079             }
1080             Node* identifier = target->get(exactRead);
1081             if (identifier)
1082                 m_heap.newPointer(node, identifier);
1083         }
1084
1085         for (auto entry : writes) {
1086             ASSERT(target);
1087             if (entry.value.isNode())
1088                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1089             else
1090                 target->remove(entry.key);
1091             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1092         }
1093
1094         m_heap.assertIsValid();
1095     }
1096
1097     bool determineSinkCandidates()
1098     {
1099         m_sinkCandidates.clear();
1100         m_materializationToEscapee.clear();
1101         m_materializationSiteToMaterializations.clear();
1102         m_materializationSiteToRecoveries.clear();
1103         m_materializationSiteToHints.clear();
1104
1105         // Logically we wish to consider every allocation and sink
1106         // it. However, it is probably not profitable to sink an
1107         // allocation that will always escape. So, we only sink an
1108         // allocation if one of the following is true:
1109         //
1110         // 1) There exists a basic block with only backwards outgoing
1111         //    edges (or no outgoing edges) in which the node wasn't
1112         //    materialized. This is meant to catch
1113         //    effectively-infinite loops in which we don't need to
1114         //    have allocated the object.
1115         //
1116         // 2) There exists a basic block at the tail of which the node
1117         //    is dead and not materialized.
1118         //
1119         // 3) The sum of execution counts of the materializations is
1120         //    less than the sum of execution counts of the original
1121         //    node.
1122         //
1123         // We currently implement only rule #2.
1124         // FIXME: Implement the two other rules.
1125         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1126         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1127         //
1128         // However, these rules allow for a sunk object to be put into
1129         // a non-sunk one, which we don't support. We could solve this
1130         // by supporting PutHints on local allocations, making these
1131         // objects only partially correct, and we would need to adapt
1132         // the OSR availability analysis and OSR exit to handle
1133         // this. This would be totally doable, but would create a
1134         // super rare, and thus bug-prone, code path.
1135         // So, instead, we need to implement one of the following
1136         // closure rules:
1137         //
1138         // 1) If we put a sink candidate into a local allocation that
1139         //    is not a sink candidate, change our minds and don't
1140         //    actually sink the sink candidate.
1141         //
1142         // 2) If we put a sink candidate into a local allocation, that
1143         //    allocation becomes a sink candidate as well.
1144         //
1145         // We currently choose to implement closure rule #2.
1146         HashMap<Node*, Vector<Node*>> dependencies;
1147         bool hasUnescapedReads = false;
1148         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1149             m_heap = m_heapAtHead[block];
1150
1151             for (Node* node : *block) {
1152                 handleNode(
1153                     node,
1154                     [&] (PromotedHeapLocation location, LazyNode value) {
1155                         if (!value.isNode())
1156                             return;
1157
1158                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1159                         if (allocation && !allocation->isEscapedAllocation())
1160                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1161                     },
1162                     [&] (PromotedHeapLocation) -> Node* {
1163                         hasUnescapedReads = true;
1164                         return nullptr;
1165                     });
1166             }
1167
1168             // The sink candidates are initially the unescaped
1169             // allocations dying at tail of blocks
1170             NodeSet allocations;
1171             for (const auto& entry : m_heap.allocations()) {
1172                 if (!entry.value.isEscapedAllocation())
1173                     allocations.addVoid(entry.key);
1174             }
1175
1176             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1177
1178             for (Node* identifier : allocations) {
1179                 if (!m_heap.isAllocation(identifier))
1180                     m_sinkCandidates.addVoid(identifier);
1181             }
1182         }
1183
1184         // Ensure that the set of sink candidates is closed for put operations
1185         Vector<Node*> worklist;
1186         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1187
1188         while (!worklist.isEmpty()) {
1189             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1190                 if (m_sinkCandidates.add(identifier).isNewEntry)
1191                     worklist.append(identifier);
1192             }
1193         }
1194
1195         if (m_sinkCandidates.isEmpty())
1196             return hasUnescapedReads;
1197
1198         if (verbose)
1199             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1200
1201         // Create the materialization nodes
1202         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1203             m_heap = m_heapAtHead[block];
1204             m_heap.setWantEscapees();
1205
1206             for (Node* node : *block) {
1207                 handleNode(
1208                     node,
1209                     [] (PromotedHeapLocation, LazyNode) { },
1210                     [] (PromotedHeapLocation) -> Node* {
1211                         return nullptr;
1212                     });
1213                 auto escapees = m_heap.takeEscapees();
1214                 if (!escapees.isEmpty())
1215                     placeMaterializations(escapees, node);
1216             }
1217
1218             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1219
1220             {
1221                 HashMap<Node*, Allocation> escapingOnEdge;
1222                 for (const auto& entry : m_heap.allocations()) {
1223                     if (entry.value.isEscapedAllocation())
1224                         continue;
1225
1226                     bool mustEscape = false;
1227                     for (BasicBlock* successorBlock : block->successors()) {
1228                         if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1229                             || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1230                             mustEscape = true;
1231                     }
1232
1233                     if (mustEscape)
1234                         escapingOnEdge.add(entry.key, entry.value);
1235                 }
1236                 placeMaterializations(WTFMove(escapingOnEdge), block->terminal());
1237             }
1238         }
1239
1240         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1241     }
1242
1243     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1244     {
1245         // We don't create materializations if the escapee is not a
1246         // sink candidate
1247         Vector<Node*> toRemove;
1248         for (const auto& entry : escapees) {
1249             if (!m_sinkCandidates.contains(entry.key))
1250                 toRemove.append(entry.key);
1251         }
1252         for (Node* identifier : toRemove)
1253             escapees.remove(identifier);
1254
1255         if (escapees.isEmpty())
1256             return;
1257
1258         // First collect the hints that will be needed when the node
1259         // we materialize is still stored into other unescaped sink candidates.
1260         // The way to interpret this vector is:
1261         //
1262         // PromotedHeapLocation(NotEscapedAllocation, field) = identifierAllocation
1263         //
1264         // e.g:
1265         // PromotedHeapLocation(@PhantomNewFunction, FunctionActivationPLoc) = IdentifierOf(@MaterializeCreateActivation)
1266         // or:
1267         // PromotedHeapLocation(@PhantomCreateActivation, ClosureVarPLoc(x)) = IdentifierOf(@NewFunction)
1268         //
1269         // When the rhs of the `=` is to be materialized at this `where` point in the program
1270         // and IdentifierOf(Materialization) is the original sunken allocation of the materialization.
1271         //
1272         // The reason we need to collect all the `identifiers` here is that
1273         // we may materialize multiple versions of the allocation along control
1274         // flow edges. We will PutHint these values along those edges. However,
1275         // we also need to PutHint them when we join and have a Phi of the allocations.
1276         Vector<std::pair<PromotedHeapLocation, Node*>> hints;
1277         for (const auto& entry : m_heap.allocations()) {
1278             if (escapees.contains(entry.key))
1279                 continue;
1280
1281             for (const auto& field : entry.value.fields()) {
1282                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1283                 auto iter = escapees.find(field.value);
1284                 if (iter != escapees.end()) {
1285                     ASSERT(m_sinkCandidates.contains(field.value));
1286                     hints.append(std::make_pair(PromotedHeapLocation(entry.key, field.key), field.value));
1287                 }
1288             }
1289         }
1290
1291         // Now we need to order the materialization. Any order is
1292         // valid (as long as we materialize a node first if it is
1293         // needed for the materialization of another node, e.g. a
1294         // function's activation must be materialized before the
1295         // function itself), but we want to try minimizing the number
1296         // of times we have to place Puts to close cycles after a
1297         // materialization. In other words, we are trying to find the
1298         // minimum number of materializations to remove from the
1299         // materialization graph to make it a DAG, known as the
1300         // (vertex) feedback set problem. Unfortunately, this is a
1301         // NP-hard problem, which we don't want to solve exactly.
1302         //
1303         // Instead, we use a simple greedy procedure, that procedes as
1304         // follow:
1305         //  - While there is at least one node with no outgoing edge
1306         //    amongst the remaining materializations, materialize it
1307         //    first
1308         //
1309         //  - Similarily, while there is at least one node with no
1310         //    incoming edge amongst the remaining materializations,
1311         //    materialize it last.
1312         //
1313         //  - When both previous conditions are false, we have an
1314         //    actual cycle, and we need to pick a node to
1315         //    materialize. We try greedily to remove the "pressure" on
1316         //    the remaining nodes by choosing the node with maximum
1317         //    |incoming edges| * |outgoing edges| as a measure of how
1318         //    "central" to the graph it is. We materialize it first,
1319         //    so that all the recoveries will be Puts of things into
1320         //    it (rather than Puts of the materialization into other
1321         //    objects), which means we will have a single
1322         //    StoreBarrier.
1323
1324
1325         // Compute dependencies between materializations
1326         HashMap<Node*, NodeSet> dependencies;
1327         HashMap<Node*, NodeSet> reverseDependencies;
1328         HashMap<Node*, NodeSet> forMaterialization;
1329         for (const auto& entry : escapees) {
1330             auto& myDependencies = dependencies.add(entry.key, NodeSet()).iterator->value;
1331             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, NodeSet()).iterator->value;
1332             reverseDependencies.add(entry.key, NodeSet());
1333             for (const auto& field : entry.value.fields()) {
1334                 if (escapees.contains(field.value) && field.value != entry.key) {
1335                     myDependencies.addVoid(field.value);
1336                     reverseDependencies.add(field.value, NodeSet()).iterator->value.addVoid(entry.key);
1337                     if (field.key.neededForMaterialization())
1338                         myDependenciesForMaterialization.addVoid(field.value);
1339                 }
1340             }
1341         }
1342
1343         // Helper function to update the materialized set and the
1344         // dependencies
1345         NodeSet materialized;
1346         auto materialize = [&] (Node* identifier) {
1347             materialized.addVoid(identifier);
1348             for (Node* dep : dependencies.get(identifier))
1349                 reverseDependencies.find(dep)->value.remove(identifier);
1350             for (Node* rdep : reverseDependencies.get(identifier)) {
1351                 dependencies.find(rdep)->value.remove(identifier);
1352                 forMaterialization.find(rdep)->value.remove(identifier);
1353             }
1354             dependencies.remove(identifier);
1355             reverseDependencies.remove(identifier);
1356             forMaterialization.remove(identifier);
1357         };
1358
1359         // Nodes without remaining unmaterialized fields will be
1360         // materialized first - amongst the remaining unmaterialized
1361         // nodes
1362         std::list<Allocation> toMaterialize;
1363         auto firstPos = toMaterialize.begin();
1364         auto materializeFirst = [&] (Allocation&& allocation) {
1365             materialize(allocation.identifier());
1366             // We need to insert *after* the current position
1367             if (firstPos != toMaterialize.end())
1368                 ++firstPos;
1369             firstPos = toMaterialize.insert(firstPos, WTFMove(allocation));
1370         };
1371
1372         // Nodes that no other unmaterialized node points to will be
1373         // materialized last - amongst the remaining unmaterialized
1374         // nodes
1375         auto lastPos = toMaterialize.end();
1376         auto materializeLast = [&] (Allocation&& allocation) {
1377             materialize(allocation.identifier());
1378             lastPos = toMaterialize.insert(lastPos, WTFMove(allocation));
1379         };
1380
1381         // These are the promoted locations that contains some of the
1382         // allocations we are currently escaping. If they are a location on
1383         // some other allocation we are currently materializing, we will need
1384         // to "recover" their value with a real put once the corresponding
1385         // allocation is materialized; if they are a location on some other
1386         // not-yet-materialized allocation, we will need a PutHint.
1387         Vector<PromotedHeapLocation> toRecover;
1388
1389         // This loop does the actual cycle breaking
1390         while (!escapees.isEmpty()) {
1391             materialized.clear();
1392
1393             // Materialize nodes that won't require recoveries if we can
1394             for (auto& entry : escapees) {
1395                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1396                     continue;
1397
1398                 if (dependencies.find(entry.key)->value.isEmpty()) {
1399                     materializeFirst(WTFMove(entry.value));
1400                     continue;
1401                 }
1402
1403                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1404                     materializeLast(WTFMove(entry.value));
1405                     continue;
1406                 }
1407             }
1408
1409             // We reach this only if there is an actual cycle that needs
1410             // breaking. Because we do not want to solve a NP-hard problem
1411             // here, we just heuristically pick a node and materialize it
1412             // first.
1413             if (materialized.isEmpty()) {
1414                 uint64_t maxEvaluation = 0;
1415                 Allocation* bestAllocation = nullptr;
1416                 for (auto& entry : escapees) {
1417                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1418                         continue;
1419
1420                     uint64_t evaluation =
1421                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1422                     if (evaluation > maxEvaluation) {
1423                         maxEvaluation = evaluation;
1424                         bestAllocation = &entry.value;
1425                     }
1426                 }
1427                 RELEASE_ASSERT(maxEvaluation > 0);
1428
1429                 materializeFirst(WTFMove(*bestAllocation));
1430             }
1431             RELEASE_ASSERT(!materialized.isEmpty());
1432
1433             for (Node* identifier : materialized)
1434                 escapees.remove(identifier);
1435         }
1436
1437         materialized.clear();
1438
1439         NodeSet escaped;
1440         for (const Allocation& allocation : toMaterialize)
1441             escaped.addVoid(allocation.identifier());
1442         for (const Allocation& allocation : toMaterialize) {
1443             for (const auto& field : allocation.fields()) {
1444                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1445                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1446             }
1447             materialized.addVoid(allocation.identifier());
1448         }
1449
1450         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1451             where, Vector<Node*>()).iterator->value;
1452
1453         for (const Allocation& allocation : toMaterialize) {
1454             Node* materialization = createMaterialization(allocation, where);
1455             materializations.append(materialization);
1456             m_materializationToEscapee.add(materialization, allocation.identifier());
1457         }
1458
1459         if (!toRecover.isEmpty()) {
1460             m_materializationSiteToRecoveries.add(
1461                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1462         }
1463
1464         // The hints need to be after the "real" recoveries so that we
1465         // don't hint not-yet-complete objects
1466         m_materializationSiteToHints.add(
1467             where, Vector<std::pair<PromotedHeapLocation, Node*>>()).iterator->value.appendVector(hints);
1468     }
1469
1470     Node* createMaterialization(const Allocation& allocation, Node* where)
1471     {
1472         // FIXME: This is the only place where we actually use the
1473         // fact that an allocation's identifier is indeed the node
1474         // that created the allocation.
1475         switch (allocation.kind()) {
1476         case Allocation::Kind::Object: {
1477             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1478
1479             return m_graph.addNode(
1480                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1481                 where->origin.withSemantic(allocation.identifier()->origin.semantic),
1482                 OpInfo(m_graph.addStructureSet(allocation.structures())), OpInfo(data), 0, 0);
1483         }
1484
1485         case Allocation::Kind::AsyncFunction:
1486         case Allocation::Kind::GeneratorFunction:
1487         case Allocation::Kind::Function: {
1488             FrozenValue* executable = allocation.identifier()->cellOperand();
1489             
1490             NodeType nodeType =
1491                 allocation.kind() == Allocation::Kind::GeneratorFunction ? NewGeneratorFunction :
1492                 allocation.kind() == Allocation::Kind::AsyncFunction ? NewAsyncFunction : NewFunction;
1493             
1494             return m_graph.addNode(
1495                 allocation.identifier()->prediction(), nodeType,
1496                 where->origin.withSemantic(
1497                     allocation.identifier()->origin.semantic),
1498                 OpInfo(executable));
1499             break;
1500         }
1501
1502         case Allocation::Kind::Activation: {
1503             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1504             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1505
1506             return m_graph.addNode(
1507                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1508                 where->origin.withSemantic(
1509                     allocation.identifier()->origin.semantic),
1510                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1511         }
1512
1513         default:
1514             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1515         }
1516     }
1517
1518     void promoteLocalHeap()
1519     {
1520         // Collect the set of heap locations that we will be operating
1521         // over.
1522         HashSet<PromotedHeapLocation> locations;
1523         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1524             m_heap = m_heapAtHead[block];
1525
1526             for (Node* node : *block) {
1527                 handleNode(
1528                     node,
1529                     [&] (PromotedHeapLocation location, LazyNode) {
1530                         // If the location is not on a sink candidate,
1531                         // we only sink it if it is read
1532                         if (m_sinkCandidates.contains(location.base()))
1533                             locations.addVoid(location);
1534                     },
1535                     [&] (PromotedHeapLocation location) -> Node* {
1536                         locations.addVoid(location);
1537                         return nullptr;
1538                     });
1539             }
1540         }
1541
1542         // Figure out which locations belong to which allocations.
1543         m_locationsForAllocation.clear();
1544         for (PromotedHeapLocation location : locations) {
1545             auto result = m_locationsForAllocation.add(
1546                 location.base(),
1547                 Vector<PromotedHeapLocation>());
1548             ASSERT(!result.iterator->value.contains(location));
1549             result.iterator->value.append(location);
1550         }
1551
1552         m_pointerSSA.reset();
1553         m_allocationSSA.reset();
1554
1555         // Collect the set of "variables" that we will be sinking.
1556         m_locationToVariable.clear();
1557         m_nodeToVariable.clear();
1558         Vector<Node*> indexToNode;
1559         Vector<PromotedHeapLocation> indexToLocation;
1560
1561         for (Node* index : m_sinkCandidates) {
1562             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1563             m_nodeToVariable.add(index, variable);
1564             ASSERT(indexToNode.size() == variable->index());
1565             indexToNode.append(index);
1566         }
1567
1568         for (PromotedHeapLocation location : locations) {
1569             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1570             m_locationToVariable.add(location, variable);
1571             ASSERT(indexToLocation.size() == variable->index());
1572             indexToLocation.append(location);
1573         }
1574
1575         // We insert all required constants at top of block 0 so that
1576         // they are inserted only once and we don't clutter the graph
1577         // with useless constants everywhere
1578         HashMap<FrozenValue*, Node*> lazyMapping;
1579         if (!m_bottom)
1580             m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
1581
1582         Vector<HashSet<PromotedHeapLocation>> hintsForPhi(m_sinkCandidates.size());
1583
1584         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1585             m_heap = m_heapAtHead[block];
1586
1587             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1588                 Node* node = block->at(nodeIndex);
1589
1590                 // Some named properties can be added conditionally,
1591                 // and that would necessitate bottoms
1592                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1593                     if (location.kind() != NamedPropertyPLoc)
1594                         continue;
1595
1596                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1597                     m_pointerSSA.newDef(variable, block, m_bottom);
1598                 }
1599
1600                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1601                     Node* escapee = m_materializationToEscapee.get(materialization);
1602                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1603                 }
1604
1605                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node)) {
1606                     PromotedHeapLocation location = pair.first;
1607                     Node* identifier = pair.second;
1608                     // We're materializing `identifier` at this point, and the unmaterialized
1609                     // version is inside `location`. We track which SSA variable this belongs
1610                     // to in case we also need a PutHint for the Phi.
1611                     if (UNLIKELY(validationEnabled())) {
1612                         RELEASE_ASSERT(m_sinkCandidates.contains(location.base()));
1613                         RELEASE_ASSERT(m_sinkCandidates.contains(identifier));
1614
1615                         bool found = false;
1616                         for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1617                             // We're materializing `identifier` here. This asserts that this is indeed the case.
1618                             if (m_materializationToEscapee.get(materialization) == identifier) {
1619                                 found = true;
1620                                 break;
1621                             }
1622                         }
1623                         RELEASE_ASSERT(found);
1624                     }
1625
1626                     SSACalculator::Variable* variable = m_nodeToVariable.get(identifier);
1627                     hintsForPhi[variable->index()].addVoid(location);
1628                 }
1629
1630                 if (m_sinkCandidates.contains(node))
1631                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1632
1633                 handleNode(
1634                     node,
1635                     [&] (PromotedHeapLocation location, LazyNode value) {
1636                         if (!locations.contains(location))
1637                             return;
1638
1639                         Node* nodeValue;
1640                         if (value.isNode())
1641                             nodeValue = value.asNode();
1642                         else {
1643                             auto iter = lazyMapping.find(value.asValue());
1644                             if (iter != lazyMapping.end())
1645                                 nodeValue = iter->value;
1646                             else {
1647                                 nodeValue = value.ensureIsNode(
1648                                     m_insertionSet, m_graph.block(0), 0);
1649                                 lazyMapping.add(value.asValue(), nodeValue);
1650                             }
1651                         }
1652
1653                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1654                         m_pointerSSA.newDef(variable, block, nodeValue);
1655                     },
1656                     [] (PromotedHeapLocation) -> Node* {
1657                         return nullptr;
1658                     });
1659             }
1660         }
1661         m_insertionSet.execute(m_graph.block(0));
1662
1663         // Run the SSA calculators to create Phis
1664         m_pointerSSA.computePhis(
1665             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1666                 PromotedHeapLocation location = indexToLocation[variable->index()];
1667
1668                 // Don't create Phi nodes for fields of dead allocations
1669                 if (!m_heapAtHead[block].isAllocation(location.base()))
1670                     return nullptr;
1671
1672                 // Don't create Phi nodes once we are escaped
1673                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1674                     return nullptr;
1675
1676                 // If we point to a single allocation, we will
1677                 // directly use its materialization
1678                 if (m_heapAtHead[block].follow(location))
1679                     return nullptr;
1680
1681                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1682                 phiNode->mergeFlags(NodeResultJS);
1683                 return phiNode;
1684             });
1685
1686         m_allocationSSA.computePhis(
1687             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1688                 Node* identifier = indexToNode[variable->index()];
1689
1690                 // Don't create Phi nodes for dead allocations
1691                 if (!m_heapAtHead[block].isAllocation(identifier))
1692                     return nullptr;
1693
1694                 // Don't create Phi nodes until we are escaped
1695                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1696                     return nullptr;
1697
1698                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1699                 phiNode->mergeFlags(NodeResultJS);
1700                 return phiNode;
1701             });
1702
1703         // Place Phis in the right places, replace all uses of any load with the appropriate
1704         // value, and create the materialization nodes.
1705         LocalOSRAvailabilityCalculator availabilityCalculator(m_graph);
1706         m_graph.clearReplacements();
1707         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1708             m_heap = m_heapAtHead[block];
1709             availabilityCalculator.beginBlock(block);
1710
1711             // These mapping tables are intended to be lazy. If
1712             // something is omitted from the table, it means that
1713             // there haven't been any local stores to the promoted
1714             // heap location (or any local materialization).
1715             m_localMapping.clear();
1716             m_escapeeToMaterialization.clear();
1717
1718             // Insert the Phi functions that we had previously
1719             // created.
1720             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1721                 SSACalculator::Variable* variable = phiDef->variable();
1722                 m_insertionSet.insert(0, phiDef->value());
1723
1724                 PromotedHeapLocation location = indexToLocation[variable->index()];
1725                 m_localMapping.set(location, phiDef->value());
1726
1727                 if (m_sinkCandidates.contains(location.base())) {
1728                     m_insertionSet.insert(
1729                         0,
1730                         location.createHint(
1731                             m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1732                 }
1733             }
1734
1735             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1736                 SSACalculator::Variable* variable = phiDef->variable();
1737                 m_insertionSet.insert(0, phiDef->value());
1738
1739                 Node* identifier = indexToNode[variable->index()];
1740                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1741                 bool canExit = false;
1742                 insertOSRHintsForUpdate(
1743                     0, block->at(0)->origin, canExit,
1744                     availabilityCalculator.m_availability, identifier, phiDef->value());
1745
1746                 for (PromotedHeapLocation location : hintsForPhi[variable->index()]) {
1747                     m_insertionSet.insert(0,
1748                         location.createHint(m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1749                     m_localMapping.set(location, phiDef->value());
1750                 }
1751             }
1752
1753             if (verbose) {
1754                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1755                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1756             }
1757
1758             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1759                 Node* node = block->at(nodeIndex);
1760                 bool canExit = true;
1761                 bool nextCanExit = node->origin.exitOK;
1762                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1763                     if (location.kind() != NamedPropertyPLoc)
1764                         continue;
1765
1766                     m_localMapping.set(location, m_bottom);
1767
1768                     if (m_sinkCandidates.contains(node)) {
1769                         if (verbose)
1770                             dataLog("For sink candidate ", node, " found location ", location, "\n");
1771                         m_insertionSet.insert(
1772                             nodeIndex + 1,
1773                             location.createHint(
1774                                 m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
1775                     }
1776                 }
1777
1778                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1779                     materialization->origin.exitOK &= canExit;
1780                     Node* escapee = m_materializationToEscapee.get(materialization);
1781                     populateMaterialization(block, materialization, escapee);
1782                     m_escapeeToMaterialization.set(escapee, materialization);
1783                     m_insertionSet.insert(nodeIndex, materialization);
1784                     if (verbose)
1785                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1786                 }
1787
1788                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1789                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
1790                 for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node))
1791                     m_insertionSet.insert(nodeIndex, createRecovery(block, pair.first, node, canExit));
1792
1793                 // We need to put the OSR hints after the recoveries,
1794                 // because we only want the hints once the object is
1795                 // complete
1796                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1797                     Node* escapee = m_materializationToEscapee.get(materialization);
1798                     insertOSRHintsForUpdate(
1799                         nodeIndex, node->origin, canExit,
1800                         availabilityCalculator.m_availability, escapee, materialization);
1801                 }
1802
1803                 if (node->origin.exitOK && !canExit) {
1804                     // We indicate that the exit state is fine now. It is OK because we updated the
1805                     // state above. We need to indicate this manually because the validation doesn't
1806                     // have enough information to infer that the exit state is fine.
1807                     m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
1808                 }
1809
1810                 if (m_sinkCandidates.contains(node))
1811                     m_escapeeToMaterialization.set(node, node);
1812
1813                 availabilityCalculator.executeNode(node);
1814
1815                 bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
1816
1817                 bool doLower = false;
1818                 handleNode(
1819                     node,
1820                     [&] (PromotedHeapLocation location, LazyNode value) {
1821                         if (!locations.contains(location))
1822                             return;
1823
1824                         Node* nodeValue;
1825                         if (value.isNode())
1826                             nodeValue = value.asNode();
1827                         else
1828                             nodeValue = lazyMapping.get(value.asValue());
1829
1830                         nodeValue = resolve(block, nodeValue);
1831
1832                         m_localMapping.set(location, nodeValue);
1833
1834                         if (!m_sinkCandidates.contains(location.base()))
1835                             return;
1836
1837                         doLower = true;
1838
1839                         if (verbose)
1840                             dataLog("Creating hint with value ", nodeValue, " before ", node, "\n");
1841                         m_insertionSet.insert(
1842                             nodeIndex + 1,
1843                             location.createHint(
1844                                 m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
1845                     },
1846                     [&] (PromotedHeapLocation location) -> Node* {
1847                         return resolve(block, location);
1848                     });
1849
1850                 if (!nextCanExit && desiredNextExitOK) {
1851                     // We indicate that the exit state is fine now. We need to do this because we
1852                     // emitted hints that appear to invalidate the exit state.
1853                     m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
1854                 }
1855
1856                 if (m_sinkCandidates.contains(node) || doLower) {
1857                     switch (node->op()) {
1858                     case NewObject:
1859                         node->convertToPhantomNewObject();
1860                         break;
1861
1862                     case NewFunction:
1863                         node->convertToPhantomNewFunction();
1864                         break;
1865
1866                     case NewGeneratorFunction:
1867                         node->convertToPhantomNewGeneratorFunction();
1868                         break;
1869
1870                     case NewAsyncFunction:
1871                         node->convertToPhantomNewAsyncFunction();
1872                         break;
1873
1874                     case CreateActivation:
1875                         node->convertToPhantomCreateActivation();
1876                         break;
1877
1878                     default:
1879                         node->remove();
1880                         break;
1881                     }
1882                 }
1883
1884                 m_graph.doToChildren(
1885                     node,
1886                     [&] (Edge& edge) {
1887                         edge.setNode(resolve(block, edge.node()));
1888                     });
1889             }
1890
1891             // Gotta drop some Upsilons.
1892             NodeAndIndex terminal = block->findTerminal();
1893             size_t upsilonInsertionPoint = terminal.index;
1894             NodeOrigin upsilonOrigin = terminal.node->origin;
1895             for (BasicBlock* successorBlock : block->successors()) {
1896                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1897                     Node* phiNode = phiDef->value();
1898                     SSACalculator::Variable* variable = phiDef->variable();
1899                     PromotedHeapLocation location = indexToLocation[variable->index()];
1900                     Node* incoming = resolve(block, location);
1901
1902                     m_insertionSet.insertNode(
1903                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1904                         OpInfo(phiNode), incoming->defaultEdge());
1905                 }
1906
1907                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1908                     Node* phiNode = phiDef->value();
1909                     SSACalculator::Variable* variable = phiDef->variable();
1910                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1911
1912                     m_insertionSet.insertNode(
1913                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1914                         OpInfo(phiNode), incoming->defaultEdge());
1915                 }
1916             }
1917
1918             m_insertionSet.execute(block);
1919         }
1920     }
1921
1922     NEVER_INLINE Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1923     {
1924         // If we are currently pointing to a single local allocation,
1925         // simply return the associated materialization.
1926         if (Node* identifier = m_heap.follow(location))
1927             return getMaterialization(block, identifier);
1928
1929         if (Node* result = m_localMapping.get(location))
1930             return result;
1931
1932         // This implies that there is no local mapping. Find a non-local mapping.
1933         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1934             block, m_locationToVariable.get(location));
1935         ASSERT(def);
1936         ASSERT(def->value());
1937
1938         Node* result = def->value();
1939         if (result->replacement())
1940             result = result->replacement();
1941         ASSERT(!result->replacement());
1942
1943         m_localMapping.add(location, result);
1944         return result;
1945     }
1946
1947     NEVER_INLINE Node* resolve(BasicBlock* block, Node* node)
1948     {
1949         // If we are currently pointing to a single local allocation,
1950         // simply return the associated materialization.
1951         if (Node* identifier = m_heap.follow(node))
1952             return getMaterialization(block, identifier);
1953
1954         if (node->replacement())
1955             node = node->replacement();
1956         ASSERT(!node->replacement());
1957
1958         return node;
1959     }
1960
1961     NEVER_INLINE Node* getMaterialization(BasicBlock* block, Node* identifier)
1962     {
1963         ASSERT(m_heap.isAllocation(identifier));
1964         if (!m_sinkCandidates.contains(identifier))
1965             return identifier;
1966
1967         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1968             return materialization;
1969
1970         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1971             block, m_nodeToVariable.get(identifier));
1972         ASSERT(def && def->value());
1973         m_escapeeToMaterialization.add(identifier, def->value());
1974         ASSERT(!def->value()->replacement());
1975         return def->value();
1976     }
1977
1978     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
1979     {
1980         if (verbose) {
1981             dataLog("Inserting OSR hints at ", origin, ":\n");
1982             dataLog("    Escapee: ", escapee, "\n");
1983             dataLog("    Materialization: ", materialization, "\n");
1984             dataLog("    Availability: ", availability, "\n");
1985         }
1986         
1987         // We need to follow() the value in the heap.
1988         // Consider the following graph:
1989         //
1990         // Block #0
1991         //   0: NewObject({})
1992         //   1: NewObject({})
1993         //   -: PutByOffset(@0, @1, x:0)
1994         //   -: PutStructure(@0, {x:0})
1995         //   2: GetByOffset(@0, x:0)
1996         //   -: MovHint(@2, loc1)
1997         //   -: Branch(#1, #2)
1998         //
1999         // Block #1
2000         //   3: Call(f, @1)
2001         //   4: Return(@0)
2002         //
2003         // Block #2
2004         //   -: Return(undefined)
2005         //
2006         // We need to materialize @1 at @3, and when doing so we need
2007         // to insert a MovHint for the materialization into loc1 as
2008         // well.
2009         // In order to do this, we say that we need to insert an
2010         // update hint for any availability whose node resolve()s to
2011         // the materialization.
2012         for (auto entry : availability.m_heap) {
2013             if (!entry.value.hasNode())
2014                 continue;
2015             if (m_heap.follow(entry.value.node()) != escapee)
2016                 continue;
2017
2018             m_insertionSet.insert(
2019                 nodeIndex,
2020                 entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
2021         }
2022
2023         for (unsigned i = availability.m_locals.size(); i--;) {
2024             if (!availability.m_locals[i].hasNode())
2025                 continue;
2026             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
2027                 continue;
2028
2029             int operand = availability.m_locals.operandForIndex(i);
2030             m_insertionSet.insertNode(
2031                 nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
2032                 materialization->defaultEdge());
2033         }
2034     }
2035
2036     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
2037     {
2038         Allocation& allocation = m_heap.getAllocation(escapee);
2039         switch (node->op()) {
2040         case MaterializeNewObject: {
2041             ObjectMaterializationData& data = node->objectMaterializationData();
2042             unsigned firstChild = m_graph.m_varArgChildren.size();
2043
2044             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2045
2046             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
2047             ASSERT(locations.contains(structure));
2048
2049             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
2050
2051             for (PromotedHeapLocation location : locations) {
2052                 switch (location.kind()) {
2053                 case StructurePLoc:
2054                     ASSERT(location == structure);
2055                     break;
2056
2057                 case NamedPropertyPLoc: {
2058                     ASSERT(location.base() == allocation.identifier());
2059                     data.m_properties.append(location.descriptor());
2060                     Node* value = resolve(block, location);
2061                     if (m_sinkCandidates.contains(value))
2062                         m_graph.m_varArgChildren.append(m_bottom);
2063                     else
2064                         m_graph.m_varArgChildren.append(value);
2065                     break;
2066                 }
2067
2068                 default:
2069                     DFG_CRASH(m_graph, node, "Bad location kind");
2070                 }
2071             }
2072
2073             node->children = AdjacencyList(
2074                 AdjacencyList::Variable,
2075                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2076             break;
2077         }
2078
2079         case MaterializeCreateActivation: {
2080             ObjectMaterializationData& data = node->objectMaterializationData();
2081
2082             unsigned firstChild = m_graph.m_varArgChildren.size();
2083
2084             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2085
2086             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
2087             ASSERT(locations.contains(symbolTable));
2088             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
2089             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
2090
2091             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
2092             ASSERT(locations.contains(scope));
2093             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
2094
2095             for (PromotedHeapLocation location : locations) {
2096                 switch (location.kind()) {
2097                 case ActivationScopePLoc: {
2098                     ASSERT(location == scope);
2099                     break;
2100                 }
2101
2102                 case ActivationSymbolTablePLoc: {
2103                     ASSERT(location == symbolTable);
2104                     break;
2105                 }
2106
2107                 case ClosureVarPLoc: {
2108                     ASSERT(location.base() == allocation.identifier());
2109                     data.m_properties.append(location.descriptor());
2110                     Node* value = resolve(block, location);
2111                     if (m_sinkCandidates.contains(value))
2112                         m_graph.m_varArgChildren.append(m_bottom);
2113                     else
2114                         m_graph.m_varArgChildren.append(value);
2115                     break;
2116                 }
2117
2118                 default:
2119                     DFG_CRASH(m_graph, node, "Bad location kind");
2120                 }
2121             }
2122
2123             node->children = AdjacencyList(
2124                 AdjacencyList::Variable,
2125                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2126             break;
2127         }
2128         
2129         case NewFunction:
2130         case NewGeneratorFunction:
2131         case NewAsyncFunction: {
2132             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2133             ASSERT(locations.size() == 2);
2134                 
2135             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
2136             ASSERT_UNUSED(executable, locations.contains(executable));
2137                 
2138             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
2139             ASSERT(locations.contains(activation));
2140
2141             node->child1() = Edge(resolve(block, activation), KnownCellUse);
2142             break;
2143         }
2144
2145         default:
2146             DFG_CRASH(m_graph, node, "Bad materialize op");
2147         }
2148     }
2149
2150     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
2151     {
2152         if (verbose)
2153             dataLog("Recovering ", location, " at ", where, "\n");
2154         ASSERT(location.base()->isPhantomAllocation());
2155         Node* base = getMaterialization(block, location.base());
2156         Node* value = resolve(block, location);
2157
2158         NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
2159
2160         if (verbose)
2161             dataLog("Base is ", base, " and value is ", value, "\n");
2162
2163         if (base->isPhantomAllocation()) {
2164             return PromotedHeapLocation(base, location.descriptor()).createHint(
2165                 m_graph, origin.takeValidExit(canExit), value);
2166         }
2167
2168         switch (location.kind()) {
2169         case NamedPropertyPLoc: {
2170             Allocation& allocation = m_heap.getAllocation(location.base());
2171
2172             Vector<RegisteredStructure> structures;
2173             structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2174             unsigned identifierNumber = location.info();
2175             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2176
2177             std::sort(
2178                 structures.begin(),
2179                 structures.end(),
2180                 [uid] (RegisteredStructure a, RegisteredStructure b) -> bool {
2181                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2182                 });
2183
2184             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2185
2186             if (firstOffset == structures.last()->getConcurrently(uid)) {
2187                 Node* storage = base;
2188                 // FIXME: When we decide to sink objects with a
2189                 // property storage, we should handle non-inline offsets.
2190                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2191
2192                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2193                 data->offset = firstOffset;
2194                 data->identifierNumber = identifierNumber;
2195
2196                 return m_graph.addNode(
2197                     PutByOffset,
2198                     origin.takeValidExit(canExit),
2199                     OpInfo(data),
2200                     Edge(storage, KnownCellUse),
2201                     Edge(base, KnownCellUse),
2202                     value->defaultEdge());
2203             }
2204
2205             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2206             data->identifierNumber = identifierNumber;
2207
2208             {
2209                 PropertyOffset currentOffset = firstOffset;
2210                 StructureSet currentSet;
2211                 for (RegisteredStructure structure : structures) {
2212                     PropertyOffset offset = structure->getConcurrently(uid);
2213                     if (offset != currentOffset) {
2214                         // Because our analysis treats MultiPutByOffset like an escape, we only have to
2215                         // deal with storing results that would have been previously stored by PutByOffset
2216                         // nodes. Those nodes were guarded by the appropriate type checks. This means that
2217                         // at this point, we can simply trust that the incoming value has the right type
2218                         // for whatever structure we are using.
2219                         data->variants.append(
2220                             PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2221                         currentOffset = offset;
2222                         currentSet.clear();
2223                     }
2224                     currentSet.add(structure.get());
2225                 }
2226                 data->variants.append(
2227                     PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2228             }
2229
2230             return m_graph.addNode(
2231                 MultiPutByOffset,
2232                 origin.takeValidExit(canExit),
2233                 OpInfo(data),
2234                 Edge(base, KnownCellUse),
2235                 value->defaultEdge());
2236             break;
2237         }
2238
2239         case ClosureVarPLoc: {
2240             return m_graph.addNode(
2241                 PutClosureVar,
2242                 origin.takeValidExit(canExit),
2243                 OpInfo(location.info()),
2244                 Edge(base, KnownCellUse),
2245                 value->defaultEdge());
2246             break;
2247         }
2248
2249         default:
2250             DFG_CRASH(m_graph, base, "Bad location kind");
2251             break;
2252         }
2253     }
2254
2255     // This is a great way of asking value->isStillValid() without having to worry about getting
2256     // different answers. It turns out that this analysis works OK regardless of what this
2257     // returns but breaks badly if this changes its mind for any particular InferredValue. This
2258     // method protects us from that.
2259     bool isStillValid(InferredValue* value)
2260     {
2261         return m_validInferredValues.add(value, value->isStillValid()).iterator->value;
2262     }
2263
2264     SSACalculator m_pointerSSA;
2265     SSACalculator m_allocationSSA;
2266     NodeSet m_sinkCandidates;
2267     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2268     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2269     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2270     HashMap<Node*, Node*> m_escapeeToMaterialization;
2271     InsertionSet m_insertionSet;
2272     CombinedLiveness m_combinedLiveness;
2273
2274     HashMap<InferredValue*, bool> m_validInferredValues;
2275
2276     HashMap<Node*, Node*> m_materializationToEscapee;
2277     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2278     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2279     HashMap<Node*, Vector<std::pair<PromotedHeapLocation, Node*>>> m_materializationSiteToHints;
2280
2281     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2282
2283     BlockMap<LocalHeap> m_heapAtHead;
2284     BlockMap<LocalHeap> m_heapAtTail;
2285     LocalHeap m_heap;
2286
2287     Node* m_bottom = nullptr;
2288 };
2289
2290 }
2291
2292 bool performObjectAllocationSinking(Graph& graph)
2293 {
2294     return runPhase<ObjectAllocationSinkingPhase>(graph);
2295 }
2296
2297 } } // namespace JSC::DFG
2298
2299 #endif // ENABLE(DFG_JIT)