db8dd6fb4922ee39c33867213f5b3f9b7bfbfc28
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGClobbersExitState.h"
33 #include "DFGCombinedLiveness.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertionSet.h"
36 #include "DFGLazyNode.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromotedHeapLocation.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 #include <list>
46
47 namespace JSC { namespace DFG {
48
49 namespace {
50
51 bool verbose = false;
52
53 // In order to sink object cycles, we use a points-to analysis coupled
54 // with an escape analysis. This analysis is actually similar to an
55 // abstract interpreter focused on local allocations and ignoring
56 // everything else.
57 //
58 // We represent the local heap using two mappings:
59 //
60 // - A set of the local allocations present in the function, where
61 //   each of those have a further mapping from
62 //   PromotedLocationDescriptor to local allocations they must point
63 //   to.
64 //
65 // - A "pointer" mapping from nodes to local allocations, if they must
66 //   be equal to said local allocation and are currently live. This
67 //   can be because the node is the actual node that created the
68 //   allocation, or any other node that must currently point to it -
69 //   we don't make a difference.
70 //
71 // The following graph is a motivation for why we separate allocations
72 // from pointers:
73 //
74 // Block #0
75 //  0: NewObject({})
76 //  1: NewObject({})
77 //  -: PutByOffset(@0, @1, x)
78 //  -: PutStructure(@0, {x:0})
79 //  2: GetByOffset(@0, x)
80 //  -: Jump(#1)
81 //
82 // Block #1
83 //  -: Return(@2)
84 //
85 // Here, we need to remember in block #1 that @2 points to a local
86 // allocation with appropriate fields and structures information
87 // (because we should be able to place a materialization on top of
88 // block #1 here), even though @1 is dead. We *could* just keep @1
89 // artificially alive here, but there is no real reason to do it:
90 // after all, by the end of block #0, @1 and @2 should be completely
91 // interchangeable, and there is no reason for us to artificially make
92 // @1 more important.
93 //
94 // An important point to consider to understand this separation is
95 // that we should think of the local heap as follow: we have a
96 // bunch of nodes that are pointers to "allocations" that live
97 // someplace on the heap, and those allocations can have pointers in
98 // between themselves as well. We shouldn't care about whatever
99 // names we give to the allocations ; what matters when
100 // comparing/merging two heaps is the isomorphism/comparison between
101 // the allocation graphs as seen by the nodes.
102 //
103 // For instance, in the following graph:
104 //
105 // Block #0
106 //  0: NewObject({})
107 //  -: Branch(#1, #2)
108 //
109 // Block #1
110 //  1: NewObject({})
111 //  -: PutByOffset(@0, @1, x)
112 //  -: PutStructure(@0, {x:0})
113 //  -: Jump(#3)
114 //
115 // Block #2
116 //  2: NewObject({})
117 //  -: PutByOffset(@2, undefined, x)
118 //  -: PutStructure(@2, {x:0})
119 //  -: PutByOffset(@0, @2, x)
120 //  -: PutStructure(@0, {x:0})
121 //  -: Jump(#3)
122 //
123 // Block #3
124 //  -: Return(@0)
125 //
126 // we should think of the heaps at tail of blocks #1 and #2 as being
127 // exactly the same, even though one has @0.x pointing to @1 and the
128 // other has @0.x pointing to @2, because in essence this should not
129 // be different from the graph where we hoisted @1 and @2 into a
130 // single allocation in block #0. We currently will not handle this
131 // case, because we merge allocations based on the node they are
132 // coming from, but this is only a technicality for the sake of
133 // simplicity that shouldn't hide the deeper idea outlined here.
134
135 class Allocation {
136 public:
137     // We use Escaped as a special allocation kind because when we
138     // decide to sink an allocation, we still need to keep track of it
139     // once it is escaped if it still has pointers to it in order to
140     // replace any use of those pointers by the corresponding
141     // materialization
142     enum class Kind { Escaped, Object, Activation, Function, NewArrowFunction };
143
144     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
145         : m_identifier(identifier)
146         , m_kind(kind)
147     {
148     }
149
150
151     const HashMap<PromotedLocationDescriptor, Node*>& fields() const
152     {
153         return m_fields;
154     }
155
156     Node* get(PromotedLocationDescriptor descriptor)
157     {
158         return m_fields.get(descriptor);
159     }
160
161     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
162     {
163         // Pointing to anything else than an unescaped local
164         // allocation is represented by simply not having the
165         // field
166         if (value)
167             m_fields.set(descriptor, value);
168         else
169             m_fields.remove(descriptor);
170         return *this;
171     }
172
173     void remove(PromotedLocationDescriptor descriptor)
174     {
175         set(descriptor, nullptr);
176     }
177
178     bool hasStructures() const
179     {
180         switch (kind()) {
181         case Kind::Object:
182             return true;
183
184         default:
185             return false;
186         }
187     }
188
189     Allocation& setStructures(const StructureSet& structures)
190     {
191         ASSERT(hasStructures() && !structures.isEmpty());
192         m_structures = structures;
193         return *this;
194     }
195
196     Allocation& mergeStructures(const StructureSet& structures)
197     {
198         ASSERT(hasStructures() || structures.isEmpty());
199         m_structures.merge(structures);
200         return *this;
201     }
202
203     Allocation& filterStructures(const StructureSet& structures)
204     {
205         ASSERT(hasStructures());
206         m_structures.filter(structures);
207         return *this;
208     }
209
210     const StructureSet& structures() const
211     {
212         return m_structures;
213     }
214
215     Node* identifier() const { return m_identifier; }
216
217     Kind kind() const { return m_kind; }
218
219     bool isEscapedAllocation() const
220     {
221         return kind() == Kind::Escaped;
222     }
223
224     bool isObjectAllocation() const
225     {
226         return m_kind == Kind::Object;
227     }
228
229     bool isActivationAllocation() const
230     {
231         return m_kind == Kind::Activation;
232     }
233
234     bool isFunctionAllocation() const
235     {
236         return m_kind == Kind::Function || m_kind == Kind::NewArrowFunction;
237     }
238     
239     bool isArrowFunctionAllocation() const
240     {
241         return m_kind == Kind::NewArrowFunction;
242     }
243
244     bool operator==(const Allocation& other) const
245     {
246         return m_identifier == other.m_identifier
247             && m_kind == other.m_kind
248             && m_fields == other.m_fields
249             && m_structures == other.m_structures;
250     }
251
252     bool operator!=(const Allocation& other) const
253     {
254         return !(*this == other);
255     }
256
257     void dump(PrintStream& out) const
258     {
259         dumpInContext(out, nullptr);
260     }
261
262     void dumpInContext(PrintStream& out, DumpContext* context) const
263     {
264         switch (m_kind) {
265         case Kind::Escaped:
266             out.print("Escaped");
267             break;
268
269         case Kind::Object:
270             out.print("Object");
271             break;
272
273         case Kind::Function:
274             out.print("Function");
275             break;
276                 
277         case Kind::NewArrowFunction:
278             out.print("NewArrowFunction");
279             break;
280
281         case Kind::Activation:
282             out.print("Activation");
283             break;
284         }
285         out.print("Allocation(");
286         if (!m_structures.isEmpty())
287             out.print(inContext(m_structures, context));
288         if (!m_fields.isEmpty()) {
289             if (!m_structures.isEmpty())
290                 out.print(", ");
291             out.print(mapDump(m_fields, " => #", ", "));
292         }
293         out.print(")");
294     }
295
296 private:
297     Node* m_identifier; // This is the actual node that created the allocation
298     Kind m_kind;
299     HashMap<PromotedLocationDescriptor, Node*> m_fields;
300     StructureSet m_structures;
301 };
302
303 class LocalHeap {
304 public:
305     Allocation& newAllocation(Node* node, Allocation::Kind kind)
306     {
307         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
308         m_pointers.add(node, node);
309         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
310     }
311
312     bool isAllocation(Node* identifier) const
313     {
314         return m_allocations.contains(identifier);
315     }
316
317     // Note that this is fundamentally different from
318     // onlyLocalAllocation() below. getAllocation() takes as argument
319     // a node-as-identifier, that is, an allocation node. This
320     // allocation node doesn't have to be alive; it may only be
321     // pointed to by other nodes or allocation fields.
322     // For instance, in the following graph:
323     //
324     // Block #0
325     //  0: NewObject({})
326     //  1: NewObject({})
327     //  -: PutByOffset(@0, @1, x)
328     //  -: PutStructure(@0, {x:0})
329     //  2: GetByOffset(@0, x)
330     //  -: Jump(#1)
331     //
332     // Block #1
333     //  -: Return(@2)
334     //
335     // At head of block #1, the only reachable allocation is #@1,
336     // which can be reached through node @2. Thus, getAllocation(#@1)
337     // contains the appropriate metadata for this allocation, but
338     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
339     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
340     // is the same as getAllocation(#@1), while getAllocation(#@2)
341     // does not make sense since @2 is not an allocation node.
342     //
343     // This is meant to be used when the node is already known to be
344     // an identifier (i.e. an allocation) - probably because it was
345     // found as value of a field or pointer in the current heap, or
346     // was the result of a call to follow(). In any other cases (such
347     // as when doing anything while traversing the graph), the
348     // appropriate function to call is probably onlyLocalAllocation.
349     Allocation& getAllocation(Node* identifier)
350     {
351         auto iter = m_allocations.find(identifier);
352         ASSERT(iter != m_allocations.end());
353         return iter->value;
354     }
355
356     void newPointer(Node* node, Node* identifier)
357     {
358         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
359         ASSERT(isAllocation(identifier));
360         m_pointers.add(node, identifier);
361     }
362
363     // follow solves the points-to problem. Given a live node, which
364     // may be either an allocation itself or a heap read (e.g. a
365     // GetByOffset node), it returns the corresponding allocation
366     // node, if there is one. If the argument node is neither an
367     // allocation or a heap read, or may point to different nodes,
368     // nullptr will be returned. Note that a node that points to
369     // different nodes can never point to an unescaped local
370     // allocation.
371     Node* follow(Node* node) const
372     {
373         auto iter = m_pointers.find(node);
374         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
375         return iter == m_pointers.end() ? nullptr : iter->value;
376     }
377
378     Node* follow(PromotedHeapLocation location) const
379     {
380         const Allocation& base = m_allocations.find(location.base())->value;
381         auto iter = base.fields().find(location.descriptor());
382
383         if (iter == base.fields().end())
384             return nullptr;
385
386         return iter->value;
387     }
388
389     // onlyLocalAllocation find the corresponding allocation metadata
390     // for any live node. onlyLocalAllocation(node) is essentially
391     // getAllocation(follow(node)), with appropriate null handling.
392     Allocation* onlyLocalAllocation(Node* node)
393     {
394         Node* identifier = follow(node);
395         if (!identifier)
396             return nullptr;
397
398         return &getAllocation(identifier);
399     }
400
401     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
402     {
403         Node* identifier = follow(location);
404         if (!identifier)
405             return nullptr;
406
407         return &getAllocation(identifier);
408     }
409
410     // This allows us to store the escapees only when necessary. If
411     // set, the current escapees can be retrieved at any time using
412     // takeEscapees(), which will clear the cached set of escapees;
413     // otherwise the heap won't remember escaping allocations.
414     void setWantEscapees()
415     {
416         m_wantEscapees = true;
417     }
418
419     HashMap<Node*, Allocation> takeEscapees()
420     {
421         return WTF::move(m_escapees);
422     }
423
424     void escape(Node* node)
425     {
426         Node* identifier = follow(node);
427         if (!identifier)
428             return;
429
430         escapeAllocation(identifier);
431     }
432
433     void merge(const LocalHeap& other)
434     {
435         assertIsValid();
436         other.assertIsValid();
437         ASSERT(!m_wantEscapees);
438
439         if (!reached()) {
440             ASSERT(other.reached());
441             *this = other;
442             return;
443         }
444
445         HashSet<Node*> toEscape;
446
447         for (auto& allocationEntry : other.m_allocations)
448             m_allocations.add(allocationEntry.key, allocationEntry.value);
449         for (auto& allocationEntry : m_allocations) {
450             auto allocationIter = other.m_allocations.find(allocationEntry.key);
451
452             // If we have it and they don't, it died for them but we
453             // are keeping it alive from another field somewhere.
454             // There is nothing to do - we will be escaped
455             // automatically when we handle that other field.
456             // This will also happen for allocation that we have and
457             // they don't, and all of those will get pruned.
458             if (allocationIter == other.m_allocations.end())
459                 continue;
460
461             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
462                 toEscape.add(allocationEntry.key);
463                 for (const auto& fieldEntry : allocationIter->value.fields())
464                     toEscape.add(fieldEntry.value);
465             } else {
466                 mergePointerSets(
467                     allocationEntry.value.fields(), allocationIter->value.fields(),
468                     [&] (Node* identifier) {
469                         toEscape.add(identifier);
470                     },
471                     [&] (PromotedLocationDescriptor field) {
472                         allocationEntry.value.remove(field);
473                     });
474                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
475             }
476         }
477
478         mergePointerSets(m_pointers, other.m_pointers,
479             [&] (Node* identifier) {
480                 toEscape.add(identifier);
481             },
482             [&] (Node* field) {
483                 m_pointers.remove(field);
484             });
485
486         for (Node* identifier : toEscape)
487             escapeAllocation(identifier);
488
489         if (!ASSERT_DISABLED) {
490             for (const auto& entry : m_allocations)
491                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
492         }
493
494         // If there is no remaining pointer to an allocation, we can
495         // remove it. This should only happen for escaped allocations,
496         // because we only merge liveness-pruned heaps in the first
497         // place.
498         prune();
499
500         assertIsValid();
501     }
502
503     void pruneByLiveness(const HashSet<Node*>& live)
504     {
505         Vector<Node*> toRemove;
506         for (const auto& entry : m_pointers) {
507             if (!live.contains(entry.key))
508                 toRemove.append(entry.key);
509         }
510         for (Node* node : toRemove)
511             m_pointers.remove(node);
512
513         prune();
514     }
515
516     void assertIsValid() const
517     {
518         if (ASSERT_DISABLED)
519             return;
520
521         // Pointers should point to an actual allocation
522         for (const auto& entry : m_pointers) {
523             ASSERT_UNUSED(entry, entry.value);
524             ASSERT(m_allocations.contains(entry.value));
525         }
526
527         for (const auto& allocationEntry : m_allocations) {
528             // Fields should point to an actual allocation
529             for (const auto& fieldEntry : allocationEntry.value.fields()) {
530                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
531                 ASSERT(m_allocations.contains(fieldEntry.value));
532             }
533         }
534     }
535
536     bool operator==(const LocalHeap& other) const
537     {
538         assertIsValid();
539         other.assertIsValid();
540         return m_allocations == other.m_allocations
541             && m_pointers == other.m_pointers;
542     }
543
544     bool operator!=(const LocalHeap& other) const
545     {
546         return !(*this == other);
547     }
548
549     const HashMap<Node*, Allocation>& allocations() const
550     {
551         return m_allocations;
552     }
553
554     const HashMap<Node*, Node*>& pointers() const
555     {
556         return m_pointers;
557     }
558
559     void dump(PrintStream& out) const
560     {
561         out.print("  Allocations:\n");
562         for (const auto& entry : m_allocations)
563             out.print("    #", entry.key, ": ", entry.value, "\n");
564         out.print("  Pointers:\n");
565         for (const auto& entry : m_pointers)
566             out.print("    ", entry.key, " => #", entry.value, "\n");
567     }
568
569     bool reached() const
570     {
571         return m_reached;
572     }
573
574     void setReached()
575     {
576         m_reached = true;
577     }
578
579 private:
580     // When we merge two heaps, we escape all fields of allocations,
581     // unless they point to the same thing in both heaps.
582     // The reason for this is that it allows us not to do extra work
583     // for diamond graphs where we would otherwise have to check
584     // whether we have a single definition or not, which would be
585     // cumbersome.
586     //
587     // Note that we should try to unify nodes even when they are not
588     // from the same allocation; for instance we should be able to
589     // completely eliminate all allocations from the following graph:
590     //
591     // Block #0
592     //  0: NewObject({})
593     //  -: Branch(#1, #2)
594     //
595     // Block #1
596     //  1: NewObject({})
597     //  -: PutByOffset(@1, "left", val)
598     //  -: PutStructure(@1, {val:0})
599     //  -: PutByOffset(@0, @1, x)
600     //  -: PutStructure(@0, {x:0})
601     //  -: Jump(#3)
602     //
603     // Block #2
604     //  2: NewObject({})
605     //  -: PutByOffset(@2, "right", val)
606     //  -: PutStructure(@2, {val:0})
607     //  -: PutByOffset(@0, @2, x)
608     //  -: PutStructure(@0, {x:0})
609     //  -: Jump(#3)
610     //
611     // Block #3:
612     //  3: GetByOffset(@0, x)
613     //  4: GetByOffset(@3, val)
614     //  -: Return(@4)
615     template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
616     void mergePointerSets(
617         const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
618         const EscapeFunctor& escape, const RemoveFunctor& remove)
619     {
620         Vector<Key> toRemove;
621         for (const auto& entry : my) {
622             auto iter = their.find(entry.key);
623             if (iter == their.end()) {
624                 toRemove.append(entry.key);
625                 escape(entry.value);
626             } else if (iter->value != entry.value) {
627                 toRemove.append(entry.key);
628                 escape(entry.value);
629                 escape(iter->value);
630             }
631         }
632         for (const auto& entry : their) {
633             if (my.contains(entry.key))
634                 continue;
635             escape(entry.value);
636         }
637         for (Key key : toRemove)
638             remove(key);
639     }
640
641     void escapeAllocation(Node* identifier)
642     {
643         Allocation& allocation = getAllocation(identifier);
644         if (allocation.isEscapedAllocation())
645             return;
646
647         Allocation unescaped = WTF::move(allocation);
648         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
649
650         for (const auto& entry : unescaped.fields())
651             escapeAllocation(entry.value);
652
653         if (m_wantEscapees)
654             m_escapees.add(unescaped.identifier(), WTF::move(unescaped));
655     }
656
657     void prune()
658     {
659         HashSet<Node*> reachable;
660         for (const auto& entry : m_pointers)
661             reachable.add(entry.value);
662
663         // Repeatedly mark as reachable allocations in fields of other
664         // reachable allocations
665         {
666             Vector<Node*> worklist;
667             worklist.appendRange(reachable.begin(), reachable.end());
668
669             while (!worklist.isEmpty()) {
670                 Node* identifier = worklist.takeLast();
671                 Allocation& allocation = m_allocations.find(identifier)->value;
672                 for (const auto& entry : allocation.fields()) {
673                     if (reachable.add(entry.value).isNewEntry)
674                         worklist.append(entry.value);
675                 }
676             }
677         }
678
679         // Remove unreachable allocations
680         {
681             Vector<Node*> toRemove;
682             for (const auto& entry : m_allocations) {
683                 if (!reachable.contains(entry.key))
684                     toRemove.append(entry.key);
685             }
686             for (Node* identifier : toRemove)
687                 m_allocations.remove(identifier);
688         }
689     }
690
691     bool m_reached = false;
692     HashMap<Node*, Node*> m_pointers;
693     HashMap<Node*, Allocation> m_allocations;
694
695     bool m_wantEscapees = false;
696     HashMap<Node*, Allocation> m_escapees;
697 };
698
699 class ObjectAllocationSinkingPhase : public Phase {
700 public:
701     ObjectAllocationSinkingPhase(Graph& graph)
702         : Phase(graph, "object allocation elimination")
703         , m_pointerSSA(graph)
704         , m_allocationSSA(graph)
705         , m_insertionSet(graph)
706     {
707     }
708
709     bool run()
710     {
711         ASSERT(m_graph.m_form == SSA);
712         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
713
714         if (!performSinking())
715             return false;
716
717         if (verbose) {
718             dataLog("Graph after elimination:\n");
719             m_graph.dump();
720         }
721
722         return true;
723     }
724
725 private:
726     bool performSinking()
727     {
728         m_graph.computeRefCounts();
729         m_graph.initializeNodeOwners();
730         m_graph.ensureDominators();
731         performLivenessAnalysis(m_graph);
732         performOSRAvailabilityAnalysis(m_graph);
733         m_combinedLiveness = CombinedLiveness(m_graph);
734
735         CString graphBeforeSinking;
736         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
737             StringPrintStream out;
738             m_graph.dump(out);
739             graphBeforeSinking = out.toCString();
740         }
741
742         if (verbose) {
743             dataLog("Graph before elimination:\n");
744             m_graph.dump();
745         }
746
747         performAnalysis();
748
749         if (!determineSinkCandidates())
750             return false;
751
752         if (verbose) {
753             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
754                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
755                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
756             }
757         }
758
759         promoteLocalHeap();
760
761         if (Options::validateGraphAtEachPhase())
762             DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
763         return true;
764     }
765
766     void performAnalysis()
767     {
768         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
769         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
770
771         bool changed;
772         do {
773             if (verbose)
774                 dataLog("Doing iteration of escape analysis.\n");
775             changed = false;
776
777             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
778                 m_heapAtHead[block].setReached();
779                 m_heap = m_heapAtHead[block];
780
781                 for (Node* node : *block) {
782                     handleNode(
783                         node,
784                         [] (PromotedHeapLocation, LazyNode) { },
785                         [&] (PromotedHeapLocation) -> Node* {
786                             return nullptr;
787                         });
788                 }
789
790                 if (m_heap == m_heapAtTail[block])
791                     continue;
792
793                 m_heapAtTail[block] = m_heap;
794                 changed = true;
795
796                 m_heap.assertIsValid();
797
798                 // We keep only pointers that are live, and only
799                 // allocations that are either live, pointed to by a
800                 // live pointer, or (recursively) stored in a field of
801                 // a live allocation.
802                 //
803                 // This means we can accidentaly leak non-dominating
804                 // nodes into the successor. However, due to the
805                 // non-dominance property, we are guaranteed that the
806                 // successor has at least one predecessor that is not
807                 // dominated either: this means any reference to a
808                 // non-dominating allocation in the successor will
809                 // trigger an escape and get pruned during the merge.
810                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
811
812                 for (BasicBlock* successorBlock : block->successors())
813                     m_heapAtHead[successorBlock].merge(m_heap);
814             }
815         } while (changed);
816     }
817
818     template<typename WriteFunctor, typename ResolveFunctor>
819     void handleNode(
820         Node* node,
821         const WriteFunctor& heapWrite,
822         const ResolveFunctor& heapResolve)
823     {
824         m_heap.assertIsValid();
825         ASSERT(m_heap.takeEscapees().isEmpty());
826
827         Allocation* target = nullptr;
828         HashMap<PromotedLocationDescriptor, LazyNode> writes;
829         PromotedLocationDescriptor exactRead;
830
831         switch (node->op()) {
832         case NewObject:
833             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
834             target->setStructures(node->structure());
835             writes.add(
836                 StructurePLoc, LazyNode(m_graph.freeze(node->structure())));
837             break;
838
839         case NewFunction:
840         case NewArrowFunction: {
841             bool isArrowFunction = node->op() == NewArrowFunction;
842             if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid()) {
843                 m_heap.escape(node->child1().node());
844                 break;
845             }
846             
847             target = &m_heap.newAllocation(node, isArrowFunction ? Allocation::Kind::NewArrowFunction : Allocation::Kind::Function);
848             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
849             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
850             if (isArrowFunction)
851                 writes.add(ArrowFunctionBoundThisPLoc, LazyNode(node->child2().node()));
852             break;
853         }
854
855         case CreateActivation: {
856             if (node->castOperand<SymbolTable*>()->singletonScope()->isStillValid()) {
857                 m_heap.escape(node->child1().node());
858                 break;
859             }
860             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
861             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
862             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
863             {
864                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
865                 ConcurrentJITLocker locker(symbolTable->m_lock);
866                 LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
867                 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
868                     writes.add(
869                         PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
870                         initialValue);
871                 }
872             }
873             break;
874         }
875
876         case PutStructure:
877             target = m_heap.onlyLocalAllocation(node->child1().node());
878             if (target && target->isObjectAllocation()) {
879                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next))));
880                 target->setStructures(node->transition()->next);
881             } else
882                 m_heap.escape(node->child1().node());
883             break;
884
885         case CheckStructure: {
886             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
887             if (allocation && allocation->isObjectAllocation()) {
888                 allocation->filterStructures(node->structureSet());
889                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
890                     node->convertToCheckStructureImmediate(value);
891             } else
892                 m_heap.escape(node->child1().node());
893             break;
894         }
895
896         case GetByOffset:
897         case GetGetterSetterByOffset:
898             target = m_heap.onlyLocalAllocation(node->child2().node());
899             if (target && target->isObjectAllocation()) {
900                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
901                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
902             } else {
903                 m_heap.escape(node->child1().node());
904                 m_heap.escape(node->child2().node());
905             }
906             break;
907
908         case MultiGetByOffset: {
909             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
910             if (allocation && allocation->isObjectAllocation()) {
911                 MultiGetByOffsetData& data = node->multiGetByOffsetData();
912                 StructureSet validStructures;
913                 bool hasInvalidStructures = false;
914                 for (const auto& multiGetByOffsetCase : data.cases) {
915                     if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
916                         continue;
917
918                     switch (multiGetByOffsetCase.method().kind()) {
919                     case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
920                     case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
921                         hasInvalidStructures = true;
922                         break;
923
924                     case GetByOffsetMethod::Load: // We're good
925                         validStructures.merge(multiGetByOffsetCase.set());
926                         break;
927
928                     default:
929                         RELEASE_ASSERT_NOT_REACHED();
930                     }
931                 }
932                 if (hasInvalidStructures) {
933                     m_heap.escape(node->child1().node());
934                     break;
935                 }
936                 unsigned identifierNumber = data.identifierNumber;
937                 PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
938                 if (Node* value = heapResolve(location)) {
939                     if (allocation->structures().isSubsetOf(validStructures))
940                         node->replaceWith(value);
941                     else {
942                         Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
943                         ASSERT(structure);
944                         allocation->filterStructures(validStructures);
945                         node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
946                         node->convertToCheckStructureImmediate(structure);
947                         node->setReplacement(value);
948                     }
949                 } else if (!allocation->structures().isSubsetOf(validStructures)) {
950                     // Even though we don't need the result here, we still need
951                     // to make the call to tell our caller that we could need
952                     // the StructurePLoc.
953                     // The reason for this is that when we decide not to sink a
954                     // node, we will still lower any read to its fields before
955                     // it escapes (which are usually reads across a function
956                     // call that DFGClobberize can't handle) - but we only do
957                     // this for PromotedHeapLocations that we have seen read
958                     // during the analysis!
959                     heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
960                     allocation->filterStructures(validStructures);
961                 }
962                 Node* identifier = allocation->get(location.descriptor());
963                 if (identifier)
964                     m_heap.newPointer(node, identifier);
965             } else
966                 m_heap.escape(node->child1().node());
967             break;
968         }
969
970         case PutByOffset:
971             target = m_heap.onlyLocalAllocation(node->child2().node());
972             if (target && target->isObjectAllocation()) {
973                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
974                 writes.add(
975                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
976                     LazyNode(node->child3().node()));
977             } else {
978                 m_heap.escape(node->child1().node());
979                 m_heap.escape(node->child2().node());
980                 m_heap.escape(node->child3().node());
981             }
982             break;
983
984         case GetClosureVar:
985             target = m_heap.onlyLocalAllocation(node->child1().node());
986             if (target && target->isActivationAllocation()) {
987                 exactRead =
988                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
989             } else
990                 m_heap.escape(node->child1().node());
991             break;
992
993         case PutClosureVar:
994             target = m_heap.onlyLocalAllocation(node->child1().node());
995             if (target && target->isActivationAllocation()) {
996                 writes.add(
997                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
998                     LazyNode(node->child2().node()));
999             } else {
1000                 m_heap.escape(node->child1().node());
1001                 m_heap.escape(node->child2().node());
1002             }
1003             break;
1004
1005         case SkipScope:
1006             target = m_heap.onlyLocalAllocation(node->child1().node());
1007             if (target && target->isActivationAllocation())
1008                 exactRead = ActivationScopePLoc;
1009             else
1010                 m_heap.escape(node->child1().node());
1011             break;
1012
1013         case GetExecutable:
1014             target = m_heap.onlyLocalAllocation(node->child1().node());
1015             if (target && target->isFunctionAllocation())
1016                 exactRead = FunctionExecutablePLoc;
1017             else
1018                 m_heap.escape(node->child1().node());
1019             break;
1020
1021         case LoadArrowFunctionThis:
1022             target = m_heap.onlyLocalAllocation(node->child1().node());
1023             if (target && target->isArrowFunctionAllocation())
1024                 exactRead = ArrowFunctionBoundThisPLoc;
1025             else
1026                 m_heap.escape(node->child1().node());
1027             break;
1028         
1029         case GetScope:
1030             target = m_heap.onlyLocalAllocation(node->child1().node());
1031             if (target && target->isFunctionAllocation())
1032                 exactRead = FunctionActivationPLoc;
1033             else
1034                 m_heap.escape(node->child1().node());
1035             break;
1036
1037         case Check:
1038             m_graph.doToChildren(
1039                 node,
1040                 [&] (Edge edge) {
1041                     if (edge.willNotHaveCheck())
1042                         return;
1043
1044                     if (alreadyChecked(edge.useKind(), SpecObject))
1045                         return;
1046
1047                     m_heap.escape(edge.node());
1048                 });
1049             break;
1050
1051         case MovHint:
1052         case PutHint:
1053             // Handled by OSR availability analysis
1054             break;
1055
1056         default:
1057             m_graph.doToChildren(
1058                 node,
1059                 [&] (Edge edge) {
1060                     m_heap.escape(edge.node());
1061                 });
1062             break;
1063         }
1064
1065         if (exactRead) {
1066             ASSERT(target);
1067             ASSERT(writes.isEmpty());
1068             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1069                 ASSERT(!value->replacement());
1070                 node->replaceWith(value);
1071             }
1072             Node* identifier = target->get(exactRead);
1073             if (identifier)
1074                 m_heap.newPointer(node, identifier);
1075         }
1076
1077         for (auto entry : writes) {
1078             ASSERT(target);
1079             if (entry.value.isNode())
1080                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1081             else
1082                 target->remove(entry.key);
1083             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1084         }
1085
1086         m_heap.assertIsValid();
1087     }
1088
1089     bool determineSinkCandidates()
1090     {
1091         m_sinkCandidates.clear();
1092         m_materializationToEscapee.clear();
1093         m_materializationSiteToMaterializations.clear();
1094         m_materializationSiteToRecoveries.clear();
1095
1096         // Logically we wish to consider every allocation and sink
1097         // it. However, it is probably not profitable to sink an
1098         // allocation that will always escape. So, we only sink an
1099         // allocation if one of the following is true:
1100         //
1101         // 1) There exists a basic block with only backwards outgoing
1102         //    edges (or no outgoing edges) in which the node wasn't
1103         //    materialized. This is meant to catch
1104         //    effectively-infinite loops in which we don't need to
1105         //    have allocated the object.
1106         //
1107         // 2) There exists a basic block at the tail of which the node
1108         //    is dead and not materialized.
1109         //
1110         // 3) The sum of execution counts of the materializations is
1111         //    less than the sum of execution counts of the original
1112         //    node.
1113         //
1114         // We currently implement only rule #2.
1115         // FIXME: Implement the two other rules.
1116         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1117         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1118         //
1119         // However, these rules allow for a sunk object to be put into
1120         // a non-sunk one, which we don't support. We could solve this
1121         // by supporting PutHints on local allocations, making these
1122         // objects only partially correct, and we would need to adapt
1123         // the OSR availability analysis and OSR exit to handle
1124         // this. This would be totally doable, but would create a
1125         // super rare, and thus bug-prone, code path.
1126         // So, instead, we need to implement one of the following
1127         // closure rules:
1128         //
1129         // 1) If we put a sink candidate into a local allocation that
1130         //    is not a sink candidate, change our minds and don't
1131         //    actually sink the sink candidate.
1132         //
1133         // 2) If we put a sink candidate into a local allocation, that
1134         //    allocation becomes a sink candidate as well.
1135         //
1136         // We currently choose to implement closure rule #2.
1137         HashMap<Node*, Vector<Node*>> dependencies;
1138         bool hasUnescapedReads = false;
1139         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1140             m_heap = m_heapAtHead[block];
1141
1142             for (Node* node : *block) {
1143                 handleNode(
1144                     node,
1145                     [&] (PromotedHeapLocation location, LazyNode value) {
1146                         if (!value.isNode())
1147                             return;
1148
1149                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1150                         if (allocation && !allocation->isEscapedAllocation())
1151                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1152                     },
1153                     [&] (PromotedHeapLocation) -> Node* {
1154                         hasUnescapedReads = true;
1155                         return nullptr;
1156                     });
1157             }
1158
1159             // The sink candidates are initially the unescaped
1160             // allocations dying at tail of blocks
1161             HashSet<Node*> allocations;
1162             for (const auto& entry : m_heap.allocations()) {
1163                 if (!entry.value.isEscapedAllocation())
1164                     allocations.add(entry.key);
1165             }
1166
1167             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1168
1169             for (Node* identifier : allocations) {
1170                 if (!m_heap.isAllocation(identifier))
1171                     m_sinkCandidates.add(identifier);
1172             }
1173         }
1174
1175         // Ensure that the set of sink candidates is closed for put operations
1176         Vector<Node*> worklist;
1177         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1178
1179         while (!worklist.isEmpty()) {
1180             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1181                 if (m_sinkCandidates.add(identifier).isNewEntry)
1182                     worklist.append(identifier);
1183             }
1184         }
1185
1186         if (m_sinkCandidates.isEmpty())
1187             return hasUnescapedReads;
1188
1189         if (verbose)
1190             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1191
1192         // Create the materialization nodes
1193         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1194             m_heap = m_heapAtHead[block];
1195             m_heap.setWantEscapees();
1196
1197             for (Node* node : *block) {
1198                 handleNode(
1199                     node,
1200                     [] (PromotedHeapLocation, LazyNode) { },
1201                     [] (PromotedHeapLocation) -> Node* {
1202                         return nullptr;
1203                     });
1204                 auto escapees = m_heap.takeEscapees();
1205                 if (!escapees.isEmpty())
1206                     placeMaterializations(escapees, node);
1207             }
1208
1209             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1210
1211             {
1212                 HashMap<Node*, Allocation> escapingOnEdge;
1213                 for (const auto& entry : m_heap.allocations()) {
1214                     if (entry.value.isEscapedAllocation())
1215                         continue;
1216
1217                     bool mustEscape = false;
1218                     for (BasicBlock* successorBlock : block->successors()) {
1219                         if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1220                             || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1221                             mustEscape = true;
1222                     }
1223
1224                     if (mustEscape)
1225                         escapingOnEdge.add(entry.key, entry.value);
1226                 }
1227                 placeMaterializations(WTF::move(escapingOnEdge), block->terminal());
1228             }
1229         }
1230
1231         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1232     }
1233
1234     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1235     {
1236         // We don't create materializations if the escapee is not a
1237         // sink candidate
1238         Vector<Node*> toRemove;
1239         for (const auto& entry : escapees) {
1240             if (!m_sinkCandidates.contains(entry.key))
1241                 toRemove.append(entry.key);
1242         }
1243         for (Node* identifier : toRemove)
1244             escapees.remove(identifier);
1245
1246         if (escapees.isEmpty())
1247             return;
1248
1249         // First collect the hints that will be needed when the node
1250         // we materialize is still stored into other unescaped sink candidates
1251         Vector<PromotedHeapLocation> hints;
1252         for (const auto& entry : m_heap.allocations()) {
1253             if (escapees.contains(entry.key))
1254                 continue;
1255
1256             for (const auto& field : entry.value.fields()) {
1257                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1258                 if (escapees.contains(field.value) && !field.key.neededForMaterialization())
1259                     hints.append(PromotedHeapLocation(entry.key, field.key));
1260             }
1261         }
1262
1263         // Now we need to order the materialization. Any order is
1264         // valid (as long as we materialize a node first if it is
1265         // needed for the materialization of another node, e.g. a
1266         // function's activation must be materialized before the
1267         // function itself), but we want to try minimizing the number
1268         // of times we have to place Puts to close cycles after a
1269         // materialization. In other words, we are trying to find the
1270         // minimum number of materializations to remove from the
1271         // materialization graph to make it a DAG, known as the
1272         // (vertex) feedback set problem. Unfortunately, this is a
1273         // NP-hard problem, which we don't want to solve exactly.
1274         //
1275         // Instead, we use a simple greedy procedure, that procedes as
1276         // follow:
1277         //  - While there is at least one node with no outgoing edge
1278         //    amongst the remaining materializations, materialize it
1279         //    first
1280         //
1281         //  - Similarily, while there is at least one node with no
1282         //    incoming edge amongst the remaining materializations,
1283         //    materialize it last.
1284         //
1285         //  - When both previous conditions are false, we have an
1286         //    actual cycle, and we need to pick a node to
1287         //    materialize. We try greedily to remove the "pressure" on
1288         //    the remaining nodes by choosing the node with maximum
1289         //    |incoming edges| * |outgoing edges| as a measure of how
1290         //    "central" to the graph it is. We materialize it first,
1291         //    so that all the recoveries will be Puts of things into
1292         //    it (rather than Puts of the materialization into other
1293         //    objects), which means we will have a single
1294         //    StoreBarrier.
1295
1296
1297         // Compute dependencies between materializations
1298         HashMap<Node*, HashSet<Node*>> dependencies;
1299         HashMap<Node*, HashSet<Node*>> reverseDependencies;
1300         HashMap<Node*, HashSet<Node*>> forMaterialization;
1301         for (const auto& entry : escapees) {
1302             auto& myDependencies = dependencies.add(entry.key, HashSet<Node*>()).iterator->value;
1303             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, HashSet<Node*>()).iterator->value;
1304             reverseDependencies.add(entry.key, HashSet<Node*>());
1305             for (const auto& field : entry.value.fields()) {
1306                 if (escapees.contains(field.value) && field.value != entry.key) {
1307                     myDependencies.add(field.value);
1308                     reverseDependencies.add(field.value, HashSet<Node*>()).iterator->value.add(entry.key);
1309                     if (field.key.neededForMaterialization())
1310                         myDependenciesForMaterialization.add(field.value);
1311                 }
1312             }
1313         }
1314
1315         // Helper function to update the materialized set and the
1316         // dependencies
1317         HashSet<Node*> materialized;
1318         auto materialize = [&] (Node* identifier) {
1319             materialized.add(identifier);
1320             for (Node* dep : dependencies.get(identifier))
1321                 reverseDependencies.find(dep)->value.remove(identifier);
1322             for (Node* rdep : reverseDependencies.get(identifier)) {
1323                 dependencies.find(rdep)->value.remove(identifier);
1324                 forMaterialization.find(rdep)->value.remove(identifier);
1325             }
1326             dependencies.remove(identifier);
1327             reverseDependencies.remove(identifier);
1328             forMaterialization.remove(identifier);
1329         };
1330
1331         // Nodes without remaining unmaterialized fields will be
1332         // materialized first - amongst the remaining unmaterialized
1333         // nodes
1334         std::list<Allocation> toMaterialize;
1335         auto firstPos = toMaterialize.begin();
1336         auto materializeFirst = [&] (Allocation&& allocation) {
1337             materialize(allocation.identifier());
1338             // We need to insert *after* the current position
1339             if (firstPos != toMaterialize.end())
1340                 ++firstPos;
1341             firstPos = toMaterialize.insert(firstPos, WTF::move(allocation));
1342         };
1343
1344         // Nodes that no other unmaterialized node points to will be
1345         // materialized last - amongst the remaining unmaterialized
1346         // nodes
1347         auto lastPos = toMaterialize.end();
1348         auto materializeLast = [&] (Allocation&& allocation) {
1349             materialize(allocation.identifier());
1350             lastPos = toMaterialize.insert(lastPos, WTF::move(allocation));
1351         };
1352
1353         // These are the promoted locations that contains some of the
1354         // allocations we are currently escaping. If they are a location on
1355         // some other allocation we are currently materializing, we will need
1356         // to "recover" their value with a real put once the corresponding
1357         // allocation is materialized; if they are a location on some other
1358         // not-yet-materialized allocation, we will need a PutHint.
1359         Vector<PromotedHeapLocation> toRecover;
1360
1361         // This loop does the actual cycle breaking
1362         while (!escapees.isEmpty()) {
1363             materialized.clear();
1364
1365             // Materialize nodes that won't require recoveries if we can
1366             for (auto& entry : escapees) {
1367                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1368                     continue;
1369
1370                 if (dependencies.find(entry.key)->value.isEmpty()) {
1371                     materializeFirst(WTF::move(entry.value));
1372                     continue;
1373                 }
1374
1375                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1376                     materializeLast(WTF::move(entry.value));
1377                     continue;
1378                 }
1379             }
1380
1381             // We reach this only if there is an actual cycle that needs
1382             // breaking. Because we do not want to solve a NP-hard problem
1383             // here, we just heuristically pick a node and materialize it
1384             // first.
1385             if (materialized.isEmpty()) {
1386                 uint64_t maxEvaluation = 0;
1387                 Allocation* bestAllocation;
1388                 for (auto& entry : escapees) {
1389                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1390                         continue;
1391
1392                     uint64_t evaluation =
1393                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1394                     if (evaluation > maxEvaluation) {
1395                         maxEvaluation = evaluation;
1396                         bestAllocation = &entry.value;
1397                     }
1398                 }
1399                 RELEASE_ASSERT(maxEvaluation > 0);
1400
1401                 materializeFirst(WTF::move(*bestAllocation));
1402             }
1403             RELEASE_ASSERT(!materialized.isEmpty());
1404
1405             for (Node* identifier : materialized)
1406                 escapees.remove(identifier);
1407         }
1408
1409         materialized.clear();
1410
1411         HashSet<Node*> escaped;
1412         for (const Allocation& allocation : toMaterialize)
1413             escaped.add(allocation.identifier());
1414         for (const Allocation& allocation : toMaterialize) {
1415             for (const auto& field : allocation.fields()) {
1416                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1417                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1418             }
1419             materialized.add(allocation.identifier());
1420         }
1421
1422         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1423             where, Vector<Node*>()).iterator->value;
1424
1425         for (const Allocation& allocation : toMaterialize) {
1426             Node* materialization = createMaterialization(allocation, where);
1427             materializations.append(materialization);
1428             m_materializationToEscapee.add(materialization, allocation.identifier());
1429         }
1430
1431         if (!toRecover.isEmpty()) {
1432             m_materializationSiteToRecoveries.add(
1433                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1434         }
1435
1436         // The hints need to be after the "real" recoveries so that we
1437         // don't hint not-yet-complete objects
1438         if (!hints.isEmpty()) {
1439             m_materializationSiteToRecoveries.add(
1440                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(hints);
1441         }
1442     }
1443
1444     Node* createMaterialization(const Allocation& allocation, Node* where)
1445     {
1446         // FIXME: This is the only place where we actually use the
1447         // fact that an allocation's identifier is indeed the node
1448         // that created the allocation.
1449         switch (allocation.kind()) {
1450         case Allocation::Kind::Object: {
1451             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1452             StructureSet* set = m_graph.addStructureSet(allocation.structures());
1453
1454             return m_graph.addNode(
1455                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1456                 where->origin.withSemantic(allocation.identifier()->origin.semantic),
1457                 OpInfo(set), OpInfo(data), 0, 0);
1458         }
1459
1460         case Allocation::Kind::NewArrowFunction:
1461         case Allocation::Kind::Function: {
1462             FrozenValue* executable = allocation.identifier()->cellOperand();
1463             
1464             NodeType nodeType = allocation.kind() == Allocation::Kind::NewArrowFunction ? NewArrowFunction : NewFunction;
1465             
1466             return m_graph.addNode(
1467                 allocation.identifier()->prediction(), nodeType,
1468                 where->origin.withSemantic(
1469                     allocation.identifier()->origin.semantic),
1470                 OpInfo(executable));
1471             break;
1472         }
1473
1474         case Allocation::Kind::Activation: {
1475             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1476             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1477
1478             return m_graph.addNode(
1479                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1480                 where->origin.withSemantic(
1481                     allocation.identifier()->origin.semantic),
1482                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1483         }
1484
1485         default:
1486             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1487         }
1488     }
1489
1490     void promoteLocalHeap()
1491     {
1492         // Collect the set of heap locations that we will be operating
1493         // over.
1494         HashSet<PromotedHeapLocation> locations;
1495         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1496             m_heap = m_heapAtHead[block];
1497
1498             for (Node* node : *block) {
1499                 handleNode(
1500                     node,
1501                     [&] (PromotedHeapLocation location, LazyNode) {
1502                         // If the location is not on a sink candidate,
1503                         // we only sink it if it is read
1504                         if (m_sinkCandidates.contains(location.base()))
1505                             locations.add(location);
1506                     },
1507                     [&] (PromotedHeapLocation location) -> Node* {
1508                         locations.add(location);
1509                         return nullptr;
1510                     });
1511             }
1512         }
1513
1514         // Figure out which locations belong to which allocations.
1515         m_locationsForAllocation.clear();
1516         for (PromotedHeapLocation location : locations) {
1517             auto result = m_locationsForAllocation.add(
1518                 location.base(),
1519                 Vector<PromotedHeapLocation>());
1520             ASSERT(!result.iterator->value.contains(location));
1521             result.iterator->value.append(location);
1522         }
1523
1524         m_pointerSSA.reset();
1525         m_allocationSSA.reset();
1526
1527         // Collect the set of "variables" that we will be sinking.
1528         m_locationToVariable.clear();
1529         m_nodeToVariable.clear();
1530         Vector<Node*> indexToNode;
1531         Vector<PromotedHeapLocation> indexToLocation;
1532
1533         for (Node* index : m_sinkCandidates) {
1534             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1535             m_nodeToVariable.add(index, variable);
1536             ASSERT(indexToNode.size() == variable->index());
1537             indexToNode.append(index);
1538         }
1539
1540         for (PromotedHeapLocation location : locations) {
1541             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1542             m_locationToVariable.add(location, variable);
1543             ASSERT(indexToLocation.size() == variable->index());
1544             indexToLocation.append(location);
1545         }
1546
1547         // We insert all required constants at top of block 0 so that
1548         // they are inserted only once and we don't clutter the graph
1549         // with useless constants everywhere
1550         HashMap<FrozenValue*, Node*> lazyMapping;
1551         if (!m_bottom)
1552             m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
1553         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1554             m_heap = m_heapAtHead[block];
1555
1556             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1557                 Node* node = block->at(nodeIndex);
1558
1559                 // Some named properties can be added conditionally,
1560                 // and that would necessitate bottoms
1561                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1562                     if (location.kind() != NamedPropertyPLoc)
1563                         continue;
1564
1565                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1566                     m_pointerSSA.newDef(variable, block, m_bottom);
1567                 }
1568
1569                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1570                     Node* escapee = m_materializationToEscapee.get(materialization);
1571                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1572                 }
1573
1574                 if (m_sinkCandidates.contains(node))
1575                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1576
1577                 handleNode(
1578                     node,
1579                     [&] (PromotedHeapLocation location, LazyNode value) {
1580                         if (!locations.contains(location))
1581                             return;
1582
1583                         Node* nodeValue;
1584                         if (value.isNode())
1585                             nodeValue = value.asNode();
1586                         else {
1587                             auto iter = lazyMapping.find(value.asValue());
1588                             if (iter != lazyMapping.end())
1589                                 nodeValue = iter->value;
1590                             else {
1591                                 nodeValue = value.ensureIsNode(
1592                                     m_insertionSet, m_graph.block(0), 0);
1593                                 lazyMapping.add(value.asValue(), nodeValue);
1594                             }
1595                         }
1596
1597                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1598                         m_pointerSSA.newDef(variable, block, nodeValue);
1599                     },
1600                     [] (PromotedHeapLocation) -> Node* {
1601                         return nullptr;
1602                     });
1603             }
1604         }
1605         m_insertionSet.execute(m_graph.block(0));
1606
1607         // Run the SSA calculators to create Phis
1608         m_pointerSSA.computePhis(
1609             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1610                 PromotedHeapLocation location = indexToLocation[variable->index()];
1611
1612                 // Don't create Phi nodes for fields of dead allocations
1613                 if (!m_heapAtHead[block].isAllocation(location.base()))
1614                     return nullptr;
1615
1616                 // Don't create Phi nodes once we are escaped
1617                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1618                     return nullptr;
1619
1620                 // If we point to a single allocation, we will
1621                 // directly use its materialization
1622                 if (m_heapAtHead[block].follow(location))
1623                     return nullptr;
1624
1625                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1626                 phiNode->mergeFlags(NodeResultJS);
1627                 return phiNode;
1628             });
1629
1630         m_allocationSSA.computePhis(
1631             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1632                 Node* identifier = indexToNode[variable->index()];
1633
1634                 // Don't create Phi nodes for dead allocations
1635                 if (!m_heapAtHead[block].isAllocation(identifier))
1636                     return nullptr;
1637
1638                 // Don't create Phi nodes until we are escaped
1639                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1640                     return nullptr;
1641
1642                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1643                 phiNode->mergeFlags(NodeResultJS);
1644                 return phiNode;
1645             });
1646
1647         // Place Phis in the right places, replace all uses of any load with the appropriate
1648         // value, and create the materialization nodes.
1649         LocalOSRAvailabilityCalculator availabilityCalculator;
1650         m_graph.clearReplacements();
1651         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1652             m_heap = m_heapAtHead[block];
1653             availabilityCalculator.beginBlock(block);
1654
1655             // These mapping tables are intended to be lazy. If
1656             // something is omitted from the table, it means that
1657             // there haven't been any local stores to the promoted
1658             // heap location (or any local materialization).
1659             m_localMapping.clear();
1660             m_escapeeToMaterialization.clear();
1661
1662             // Insert the Phi functions that we had previously
1663             // created.
1664             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1665                 SSACalculator::Variable* variable = phiDef->variable();
1666                 m_insertionSet.insert(0, phiDef->value());
1667
1668                 PromotedHeapLocation location = indexToLocation[variable->index()];
1669                 m_localMapping.set(location, phiDef->value());
1670
1671                 if (m_sinkCandidates.contains(location.base())) {
1672                     m_insertionSet.insert(
1673                         0,
1674                         location.createHint(
1675                             m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1676                 }
1677             }
1678
1679             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1680                 SSACalculator::Variable* variable = phiDef->variable();
1681                 m_insertionSet.insert(0, phiDef->value());
1682
1683                 Node* identifier = indexToNode[variable->index()];
1684                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1685                 bool canExit = false;
1686                 insertOSRHintsForUpdate(
1687                     0, block->at(0)->origin, canExit,
1688                     availabilityCalculator.m_availability, identifier, phiDef->value());
1689             }
1690
1691             if (verbose) {
1692                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1693                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1694             }
1695
1696             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1697                 Node* node = block->at(nodeIndex);
1698                 bool canExit = true;
1699                 bool nextCanExit = node->origin.exitOK;
1700                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1701                     if (location.kind() != NamedPropertyPLoc)
1702                         continue;
1703
1704                     m_localMapping.set(location, m_bottom);
1705
1706                     if (m_sinkCandidates.contains(node)) {
1707                         m_insertionSet.insert(
1708                             nodeIndex + 1,
1709                             location.createHint(
1710                                 m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
1711                     }
1712                 }
1713
1714                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1715                     materialization->origin.exitOK &= canExit;
1716                     Node* escapee = m_materializationToEscapee.get(materialization);
1717                     populateMaterialization(block, materialization, escapee);
1718                     m_escapeeToMaterialization.set(escapee, materialization);
1719                     m_insertionSet.insert(nodeIndex, materialization);
1720                     if (verbose)
1721                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1722                 }
1723
1724                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1725                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
1726
1727                 // We need to put the OSR hints after the recoveries,
1728                 // because we only want the hints once the object is
1729                 // complete
1730                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1731                     Node* escapee = m_materializationToEscapee.get(materialization);
1732                     insertOSRHintsForUpdate(
1733                         nodeIndex, node->origin, canExit,
1734                         availabilityCalculator.m_availability, escapee, materialization);
1735                 }
1736
1737                 if (node->origin.exitOK && !canExit) {
1738                     // We indicate that the exit state is fine now. It is OK because we updated the
1739                     // state above. We need to indicate this manually because the validation doesn't
1740                     // have enough information to infer that the exit state is fine.
1741                     m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
1742                 }
1743
1744                 if (m_sinkCandidates.contains(node))
1745                     m_escapeeToMaterialization.set(node, node);
1746
1747                 availabilityCalculator.executeNode(node);
1748
1749                 bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
1750
1751                 bool doLower = false;
1752                 handleNode(
1753                     node,
1754                     [&] (PromotedHeapLocation location, LazyNode value) {
1755                         if (!locations.contains(location))
1756                             return;
1757
1758                         Node* nodeValue;
1759                         if (value.isNode())
1760                             nodeValue = value.asNode();
1761                         else
1762                             nodeValue = lazyMapping.get(value.asValue());
1763
1764                         nodeValue = resolve(block, nodeValue);
1765
1766                         m_localMapping.set(location, nodeValue);
1767
1768                         if (!m_sinkCandidates.contains(location.base()))
1769                             return;
1770
1771                         doLower = true;
1772
1773                         m_insertionSet.insert(
1774                             nodeIndex + 1,
1775                             location.createHint(
1776                                 m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
1777                     },
1778                     [&] (PromotedHeapLocation location) -> Node* {
1779                         return resolve(block, location);
1780                     });
1781
1782                 if (!nextCanExit && desiredNextExitOK) {
1783                     // We indicate that the exit state is fine now. We need to do this because we
1784                     // emitted hints that appear to invalidate the exit state.
1785                     m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
1786                 }
1787
1788                 if (m_sinkCandidates.contains(node) || doLower) {
1789                     switch (node->op()) {
1790                     case NewObject:
1791                         node->convertToPhantomNewObject();
1792                         break;
1793
1794                     case NewArrowFunction:
1795                     case NewFunction:
1796                         node->convertToPhantomNewFunction();
1797                         break;
1798
1799                     case CreateActivation:
1800                         node->convertToPhantomCreateActivation();
1801                         break;
1802
1803                     default:
1804                         node->remove();
1805                         break;
1806                     }
1807                 }
1808
1809                 m_graph.doToChildren(
1810                     node,
1811                     [&] (Edge& edge) {
1812                         edge.setNode(resolve(block, edge.node()));
1813                     });
1814             }
1815
1816             // Gotta drop some Upsilons.
1817             NodeAndIndex terminal = block->findTerminal();
1818             size_t upsilonInsertionPoint = terminal.index;
1819             NodeOrigin upsilonOrigin = terminal.node->origin;
1820             for (BasicBlock* successorBlock : block->successors()) {
1821                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1822                     Node* phiNode = phiDef->value();
1823                     SSACalculator::Variable* variable = phiDef->variable();
1824                     PromotedHeapLocation location = indexToLocation[variable->index()];
1825                     Node* incoming = resolve(block, location);
1826
1827                     m_insertionSet.insertNode(
1828                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1829                         OpInfo(phiNode), incoming->defaultEdge());
1830                 }
1831
1832                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1833                     Node* phiNode = phiDef->value();
1834                     SSACalculator::Variable* variable = phiDef->variable();
1835                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1836
1837                     m_insertionSet.insertNode(
1838                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1839                         OpInfo(phiNode), incoming->defaultEdge());
1840                 }
1841             }
1842
1843             m_insertionSet.execute(block);
1844         }
1845     }
1846
1847     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1848     {
1849         // If we are currently pointing to a single local allocation,
1850         // simply return the associated materialization.
1851         if (Node* identifier = m_heap.follow(location))
1852             return getMaterialization(block, identifier);
1853
1854         if (Node* result = m_localMapping.get(location))
1855             return result;
1856
1857         // This implies that there is no local mapping. Find a non-local mapping.
1858         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1859             block, m_locationToVariable.get(location));
1860         ASSERT(def);
1861         ASSERT(def->value());
1862
1863         Node* result = def->value();
1864
1865         ASSERT(!result->replacement());
1866
1867         m_localMapping.add(location, result);
1868         return result;
1869     }
1870
1871     Node* resolve(BasicBlock* block, Node* node)
1872     {
1873         // If we are currently pointing to a single local allocation,
1874         // simply return the associated materialization.
1875         if (Node* identifier = m_heap.follow(node))
1876             return getMaterialization(block, identifier);
1877
1878         if (node->replacement())
1879             node = node->replacement();
1880         ASSERT(!node->replacement());
1881
1882         return node;
1883     }
1884
1885     Node* getMaterialization(BasicBlock* block, Node* identifier)
1886     {
1887         ASSERT(m_heap.isAllocation(identifier));
1888         if (!m_sinkCandidates.contains(identifier))
1889             return identifier;
1890
1891         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1892             return materialization;
1893
1894         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1895             block, m_nodeToVariable.get(identifier));
1896         ASSERT(def && def->value());
1897         m_escapeeToMaterialization.add(identifier, def->value());
1898         ASSERT(!def->value()->replacement());
1899         return def->value();
1900     }
1901
1902     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
1903     {
1904         // We need to follow() the value in the heap.
1905         // Consider the following graph:
1906         //
1907         // Block #0
1908         //   0: NewObject({})
1909         //   1: NewObject({})
1910         //   -: PutByOffset(@0, @1, x:0)
1911         //   -: PutStructure(@0, {x:0})
1912         //   2: GetByOffset(@0, x:0)
1913         //   -: MovHint(@2, loc1)
1914         //   -: Branch(#1, #2)
1915         //
1916         // Block #1
1917         //   3: Call(f, @1)
1918         //   4: Return(@0)
1919         //
1920         // Block #2
1921         //   -: Return(undefined)
1922         //
1923         // We need to materialize @1 at @3, and when doing so we need
1924         // to insert a MovHint for the materialization into loc1 as
1925         // well.
1926         // In order to do this, we say that we need to insert an
1927         // update hint for any availability whose node resolve()s to
1928         // the materialization.
1929         for (auto entry : availability.m_heap) {
1930             if (!entry.value.hasNode())
1931                 continue;
1932             if (m_heap.follow(entry.value.node()) != escapee)
1933                 continue;
1934
1935             m_insertionSet.insert(
1936                 nodeIndex,
1937                 entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
1938         }
1939
1940         for (unsigned i = availability.m_locals.size(); i--;) {
1941             if (!availability.m_locals[i].hasNode())
1942                 continue;
1943             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
1944                 continue;
1945
1946             int operand = availability.m_locals.operandForIndex(i);
1947             m_insertionSet.insertNode(
1948                 nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
1949                 materialization->defaultEdge());
1950         }
1951     }
1952
1953     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
1954     {
1955         Allocation& allocation = m_heap.getAllocation(escapee);
1956         switch (node->op()) {
1957         case MaterializeNewObject: {
1958             ObjectMaterializationData& data = node->objectMaterializationData();
1959             unsigned firstChild = m_graph.m_varArgChildren.size();
1960
1961             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1962
1963             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
1964             ASSERT(locations.contains(structure));
1965
1966             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
1967
1968             for (PromotedHeapLocation location : locations) {
1969                 switch (location.kind()) {
1970                 case StructurePLoc:
1971                     ASSERT(location == structure);
1972                     break;
1973
1974                 case NamedPropertyPLoc: {
1975                     ASSERT(location.base() == allocation.identifier());
1976                     data.m_properties.append(PhantomPropertyValue(location.info()));
1977                     Node* value = resolve(block, location);
1978                     if (m_sinkCandidates.contains(value))
1979                         m_graph.m_varArgChildren.append(m_bottom);
1980                     else
1981                         m_graph.m_varArgChildren.append(value);
1982                     break;
1983                 }
1984
1985                 default:
1986                     DFG_CRASH(m_graph, node, "Bad location kind");
1987                 }
1988             }
1989
1990             node->children = AdjacencyList(
1991                 AdjacencyList::Variable,
1992                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1993             break;
1994         }
1995
1996         case MaterializeCreateActivation: {
1997             ObjectMaterializationData& data = node->objectMaterializationData();
1998
1999             unsigned firstChild = m_graph.m_varArgChildren.size();
2000
2001             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2002
2003             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
2004             ASSERT(locations.contains(symbolTable));
2005             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
2006             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
2007
2008             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
2009             ASSERT(locations.contains(scope));
2010             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
2011
2012             for (PromotedHeapLocation location : locations) {
2013                 switch (location.kind()) {
2014                 case ActivationScopePLoc: {
2015                     ASSERT(location == scope);
2016                     break;
2017                 }
2018
2019                 case ActivationSymbolTablePLoc: {
2020                     ASSERT(location == symbolTable);
2021                     break;
2022                 }
2023
2024                 case ClosureVarPLoc: {
2025                     ASSERT(location.base() == allocation.identifier());
2026                     data.m_properties.append(PhantomPropertyValue(location.info()));
2027                     Node* value = resolve(block, location);
2028                     if (m_sinkCandidates.contains(value))
2029                         m_graph.m_varArgChildren.append(m_bottom);
2030                     else
2031                         m_graph.m_varArgChildren.append(value);
2032                     break;
2033                 }
2034
2035                 default:
2036                     DFG_CRASH(m_graph, node, "Bad location kind");
2037                 }
2038             }
2039
2040             node->children = AdjacencyList(
2041                 AdjacencyList::Variable,
2042                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2043             break;
2044         }
2045         
2046         case NewFunction:
2047         case NewArrowFunction: {
2048             bool isArrowFunction = node->op() == NewArrowFunction;
2049             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2050             ASSERT(locations.size() == (isArrowFunction ? 3 : 2));
2051                 
2052             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
2053             ASSERT_UNUSED(executable, locations.contains(executable));
2054                 
2055             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
2056             ASSERT(locations.contains(activation));
2057
2058             node->child1() = Edge(resolve(block, activation), KnownCellUse);
2059             
2060             if (isArrowFunction) {
2061                 PromotedHeapLocation boundThis(ArrowFunctionBoundThisPLoc, allocation.identifier());
2062                 ASSERT(locations.contains(boundThis));
2063                 node->child2() = Edge(resolve(block, boundThis), CellUse);
2064             }
2065             
2066             break;
2067         }
2068
2069         default:
2070             DFG_CRASH(m_graph, node, "Bad materialize op");
2071         }
2072     }
2073
2074     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
2075     {
2076         if (verbose)
2077             dataLog("Recovering ", location, " at ", where, "\n");
2078         ASSERT(location.base()->isPhantomAllocation());
2079         Node* base = getMaterialization(block, location.base());
2080         Node* value = resolve(block, location);
2081
2082         NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
2083
2084         if (verbose)
2085             dataLog("Base is ", base, " and value is ", value, "\n");
2086
2087         if (base->isPhantomAllocation()) {
2088             return PromotedHeapLocation(base, location.descriptor()).createHint(
2089                 m_graph, origin.takeValidExit(canExit), value);
2090         }
2091
2092         switch (location.kind()) {
2093         case NamedPropertyPLoc: {
2094             Allocation& allocation = m_heap.getAllocation(location.base());
2095
2096             Vector<Structure*> structures;
2097             structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2098             unsigned identifierNumber = location.info();
2099             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2100
2101             std::sort(
2102                 structures.begin(),
2103                 structures.end(),
2104                 [uid] (Structure *a, Structure* b) -> bool {
2105                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2106                 });
2107
2108             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2109
2110             if (firstOffset == structures.last()->getConcurrently(uid)) {
2111                 Node* storage = base;
2112                 // FIXME: When we decide to sink objects with a
2113                 // property storage, we should handle non-inline offsets.
2114                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2115
2116                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2117                 data->offset = firstOffset;
2118                 data->identifierNumber = identifierNumber;
2119
2120                 return m_graph.addNode(
2121                     SpecNone,
2122                     PutByOffset,
2123                     origin.takeValidExit(canExit),
2124                     OpInfo(data),
2125                     Edge(storage, KnownCellUse),
2126                     Edge(base, KnownCellUse),
2127                     value->defaultEdge());
2128             }
2129
2130             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2131             data->identifierNumber = identifierNumber;
2132
2133             {
2134                 PropertyOffset currentOffset = firstOffset;
2135                 StructureSet currentSet;
2136                 for (Structure* structure : structures) {
2137                     PropertyOffset offset = structure->getConcurrently(uid);
2138                     if (offset != currentOffset) {
2139                         // Because our analysis treats MultiPutByOffset like an escape, we only have to
2140                         // deal with storing results that would have been previously stored by PutByOffset
2141                         // nodes. Those nodes were guarded by the appropriate type checks. This means that
2142                         // at this point, we can simply trust that the incoming value has the right type
2143                         // for whatever structure we are using.
2144                         data->variants.append(
2145                             PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2146                         currentOffset = offset;
2147                         currentSet.clear();
2148                     }
2149                     currentSet.add(structure);
2150                 }
2151                 data->variants.append(
2152                     PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2153             }
2154
2155             return m_graph.addNode(
2156                 SpecNone,
2157                 MultiPutByOffset,
2158                 origin.takeValidExit(canExit),
2159                 OpInfo(data),
2160                 Edge(base, KnownCellUse),
2161                 value->defaultEdge());
2162             break;
2163         }
2164
2165         case ClosureVarPLoc: {
2166             return m_graph.addNode(
2167                 SpecNone,
2168                 PutClosureVar,
2169                 origin.takeValidExit(canExit),
2170                 OpInfo(location.info()),
2171                 Edge(base, KnownCellUse),
2172                 value->defaultEdge());
2173             break;
2174         }
2175
2176         default:
2177             DFG_CRASH(m_graph, base, "Bad location kind");
2178             break;
2179         }
2180     }
2181
2182     SSACalculator m_pointerSSA;
2183     SSACalculator m_allocationSSA;
2184     HashSet<Node*> m_sinkCandidates;
2185     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2186     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2187     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2188     HashMap<Node*, Node*> m_escapeeToMaterialization;
2189     InsertionSet m_insertionSet;
2190     CombinedLiveness m_combinedLiveness;
2191
2192     HashMap<Node*, Node*> m_materializationToEscapee;
2193     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2194     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2195
2196     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2197
2198     BlockMap<LocalHeap> m_heapAtHead;
2199     BlockMap<LocalHeap> m_heapAtTail;
2200     LocalHeap m_heap;
2201
2202     Node* m_bottom = nullptr;
2203 };
2204
2205 }
2206
2207 bool performObjectAllocationSinking(Graph& graph)
2208 {
2209     SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
2210     return runPhase<ObjectAllocationSinkingPhase>(graph);
2211 }
2212
2213 } } // namespace JSC::DFG
2214
2215 #endif // ENABLE(DFG_JIT)