Remove unused code relative to allocation sinking
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGCombinedLiveness.h"
33 #include "DFGGraph.h"
34 #include "DFGInsertionSet.h"
35 #include "DFGLazyNode.h"
36 #include "DFGLivenessAnalysisPhase.h"
37 #include "DFGOSRAvailabilityAnalysisPhase.h"
38 #include "DFGPhase.h"
39 #include "DFGPromotedHeapLocation.h"
40 #include "DFGSSACalculator.h"
41 #include "DFGValidate.h"
42 #include "JSCInlines.h"
43
44 #include <list>
45
46 namespace JSC { namespace DFG {
47
48 namespace {
49
50 bool verbose = false;
51
52 // In order to sink object cycles, we use a points-to analysis coupled
53 // with an escape analysis. This analysis is actually similar to an
54 // abstract interpreter focused on local allocations and ignoring
55 // everything else.
56 //
57 // We represent the local heap using two mappings:
58 //
59 // - A set of the local allocations present in the function, where
60 //   each of those have a further mapping from
61 //   PromotedLocationDescriptor to local allocations they must point
62 //   to.
63 //
64 // - A "pointer" mapping from nodes to local allocations, if they must
65 //   be equal to said local allocation and are currently live. This
66 //   can be because the node is the actual node that created the
67 //   allocation, or any other node that must currently point to it -
68 //   we don't make a difference.
69 //
70 // The following graph is a motivation for why we separate allocations
71 // from pointers:
72 //
73 // Block #0
74 //  0: NewObject({})
75 //  1: NewObject({})
76 //  -: PutByOffset(@0, @1, x)
77 //  -: PutStructure(@0, {x:0})
78 //  2: GetByOffset(@0, x)
79 //  -: Jump(#1)
80 //
81 // Block #1
82 //  -: Return(@2)
83 //
84 // Here, we need to remember in block #1 that @2 points to a local
85 // allocation with appropriate fields and structures information
86 // (because we should be able to place a materialization on top of
87 // block #1 here), even though @1 is dead. We *could* just keep @1
88 // artificially alive here, but there is no real reason to do it:
89 // after all, by the end of block #0, @1 and @2 should be completely
90 // interchangeable, and there is no reason for us to artificially make
91 // @1 more important.
92 //
93 // An important point to consider to understand this separation is
94 // that we should think of the local heap as follow: we have a
95 // bunch of nodes that are pointers to "allocations" that live
96 // someplace on the heap, and those allocations can have pointers in
97 // between themselves as well. We shouldn't care about whatever
98 // names we give to the allocations ; what matters when
99 // comparing/merging two heaps is the isomorphism/comparison between
100 // the allocation graphs as seen by the nodes.
101 //
102 // For instance, in the following graph:
103 //
104 // Block #0
105 //  0: NewObject({})
106 //  -: Branch(#1, #2)
107 //
108 // Block #1
109 //  1: NewObject({})
110 //  -: PutByOffset(@0, @1, x)
111 //  -: PutStructure(@0, {x:0})
112 //  -: Jump(#3)
113 //
114 // Block #2
115 //  2: NewObject({})
116 //  -: PutByOffset(@2, undefined, x)
117 //  -: PutStructure(@2, {x:0})
118 //  -: PutByOffset(@0, @2, x)
119 //  -: PutStructure(@0, {x:0})
120 //  -: Jump(#3)
121 //
122 // Block #3
123 //  -: Return(@0)
124 //
125 // we should think of the heaps at tail of blocks #1 and #2 as being
126 // exactly the same, even though one has @0.x pointing to @1 and the
127 // other has @0.x pointing to @2, because in essence this should not
128 // be different from the graph where we hoisted @1 and @2 into a
129 // single allocation in block #0. We currently will not handle this
130 // case, because we merge allocations based on the node they are
131 // coming from, but this is only a technicality for the sake of
132 // simplicity that shouldn't hide the deeper idea outlined here.
133
134 class Allocation {
135 public:
136     // We use Escaped as a special allocation kind because when we
137     // decide to sink an allocation, we still need to keep track of it
138     // once it is escaped if it still has pointers to it in order to
139     // replace any use of those pointers by the corresponding
140     // materialization
141     enum class Kind { Escaped, Object, Activation, Function, NewArrowFunction };
142
143     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
144         : m_identifier(identifier)
145         , m_kind(kind)
146     {
147     }
148
149
150     const HashMap<PromotedLocationDescriptor, Node*>& fields() const
151     {
152         return m_fields;
153     }
154
155     Node* get(PromotedLocationDescriptor descriptor)
156     {
157         return m_fields.get(descriptor);
158     }
159
160     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
161     {
162         // Pointing to anything else than an unescaped local
163         // allocation is represented by simply not having the
164         // field
165         if (value)
166             m_fields.set(descriptor, value);
167         else
168             m_fields.remove(descriptor);
169         return *this;
170     }
171
172     void remove(PromotedLocationDescriptor descriptor)
173     {
174         set(descriptor, nullptr);
175     }
176
177     bool hasStructures() const
178     {
179         switch (kind()) {
180         case Kind::Object:
181             return true;
182
183         default:
184             return false;
185         }
186     }
187
188     Allocation& setStructures(const StructureSet& structures)
189     {
190         ASSERT(hasStructures() && !structures.isEmpty());
191         m_structures = structures;
192         return *this;
193     }
194
195     Allocation& mergeStructures(const StructureSet& structures)
196     {
197         ASSERT(hasStructures() || structures.isEmpty());
198         m_structures.merge(structures);
199         return *this;
200     }
201
202     Allocation& filterStructures(const StructureSet& structures)
203     {
204         ASSERT(hasStructures());
205         m_structures.filter(structures);
206         return *this;
207     }
208
209     const StructureSet& structures() const
210     {
211         return m_structures;
212     }
213
214     Node* identifier() const { return m_identifier; }
215
216     Kind kind() const { return m_kind; }
217
218     bool isEscapedAllocation() const
219     {
220         return kind() == Kind::Escaped;
221     }
222
223     bool isObjectAllocation() const
224     {
225         return m_kind == Kind::Object;
226     }
227
228     bool isActivationAllocation() const
229     {
230         return m_kind == Kind::Activation;
231     }
232
233     bool isFunctionAllocation() const
234     {
235         return m_kind == Kind::Function || m_kind == Kind::NewArrowFunction;
236     }
237     
238     bool isArrowFunctionAllocation() const
239     {
240         return m_kind == Kind::NewArrowFunction;
241     }
242
243     bool operator==(const Allocation& other) const
244     {
245         return m_identifier == other.m_identifier
246             && m_kind == other.m_kind
247             && m_fields == other.m_fields
248             && m_structures == other.m_structures;
249     }
250
251     bool operator!=(const Allocation& other) const
252     {
253         return !(*this == other);
254     }
255
256     void dump(PrintStream& out) const
257     {
258         dumpInContext(out, nullptr);
259     }
260
261     void dumpInContext(PrintStream& out, DumpContext* context) const
262     {
263         switch (m_kind) {
264         case Kind::Escaped:
265             out.print("Escaped");
266             break;
267
268         case Kind::Object:
269             out.print("Object");
270             break;
271
272         case Kind::Function:
273             out.print("Function");
274             break;
275                 
276         case Kind::NewArrowFunction:
277             out.print("NewArrowFunction");
278             break;
279
280         case Kind::Activation:
281             out.print("Activation");
282             break;
283         }
284         out.print("Allocation(");
285         if (!m_structures.isEmpty())
286             out.print(inContext(m_structures, context));
287         if (!m_fields.isEmpty()) {
288             if (!m_structures.isEmpty())
289                 out.print(", ");
290             out.print(mapDump(m_fields, " => #", ", "));
291         }
292         out.print(")");
293     }
294
295 private:
296     Node* m_identifier; // This is the actual node that created the allocation
297     Kind m_kind;
298     HashMap<PromotedLocationDescriptor, Node*> m_fields;
299     StructureSet m_structures;
300 };
301
302 class LocalHeap {
303 public:
304     Allocation& newAllocation(Node* node, Allocation::Kind kind)
305     {
306         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
307         m_pointers.add(node, node);
308         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
309     }
310
311     bool isAllocation(Node* identifier) const
312     {
313         return m_allocations.contains(identifier);
314     }
315
316     // Note that this is fundamentally different from
317     // onlyLocalAllocation() below. getAllocation() takes as argument
318     // a node-as-identifier, that is, an allocation node. This
319     // allocation node doesn't have to be alive; it may only be
320     // pointed to by other nodes or allocation fields.
321     // For instance, in the following graph:
322     //
323     // Block #0
324     //  0: NewObject({})
325     //  1: NewObject({})
326     //  -: PutByOffset(@0, @1, x)
327     //  -: PutStructure(@0, {x:0})
328     //  2: GetByOffset(@0, x)
329     //  -: Jump(#1)
330     //
331     // Block #1
332     //  -: Return(@2)
333     //
334     // At head of block #1, the only reachable allocation is #@1,
335     // which can be reached through node @2. Thus, getAllocation(#@1)
336     // contains the appropriate metadata for this allocation, but
337     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
338     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
339     // is the same as getAllocation(#@1), while getAllocation(#@2)
340     // does not make sense since @2 is not an allocation node.
341     //
342     // This is meant to be used when the node is already known to be
343     // an identifier (i.e. an allocation) - probably because it was
344     // found as value of a field or pointer in the current heap, or
345     // was the result of a call to follow(). In any other cases (such
346     // as when doing anything while traversing the graph), the
347     // appropriate function to call is probably onlyLocalAllocation.
348     Allocation& getAllocation(Node* identifier)
349     {
350         auto iter = m_allocations.find(identifier);
351         ASSERT(iter != m_allocations.end());
352         return iter->value;
353     }
354
355     void newPointer(Node* node, Node* identifier)
356     {
357         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
358         ASSERT(isAllocation(identifier));
359         m_pointers.add(node, identifier);
360     }
361
362     // follow solves the points-to problem. Given a live node, which
363     // may be either an allocation itself or a heap read (e.g. a
364     // GetByOffset node), it returns the corresponding allocation
365     // node, if there is one. If the argument node is neither an
366     // allocation or a heap read, or may point to different nodes,
367     // nullptr will be returned. Note that a node that points to
368     // different nodes can never point to an unescaped local
369     // allocation.
370     Node* follow(Node* node) const
371     {
372         auto iter = m_pointers.find(node);
373         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
374         return iter == m_pointers.end() ? nullptr : iter->value;
375     }
376
377     Node* follow(PromotedHeapLocation location) const
378     {
379         const Allocation& base = m_allocations.find(location.base())->value;
380         auto iter = base.fields().find(location.descriptor());
381
382         if (iter == base.fields().end())
383             return nullptr;
384
385         return iter->value;
386     }
387
388     // onlyLocalAllocation find the corresponding allocation metadata
389     // for any live node. onlyLocalAllocation(node) is essentially
390     // getAllocation(follow(node)), with appropriate null handling.
391     Allocation* onlyLocalAllocation(Node* node)
392     {
393         Node* identifier = follow(node);
394         if (!identifier)
395             return nullptr;
396
397         return &getAllocation(identifier);
398     }
399
400     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
401     {
402         Node* identifier = follow(location);
403         if (!identifier)
404             return nullptr;
405
406         return &getAllocation(identifier);
407     }
408
409     // This allows us to store the escapees only when necessary. If
410     // set, the current escapees can be retrieved at any time using
411     // takeEscapees(), which will clear the cached set of escapees;
412     // otherwise the heap won't remember escaping allocations.
413     void setWantEscapees()
414     {
415         m_wantEscapees = true;
416     }
417
418     HashMap<Node*, Allocation> takeEscapees()
419     {
420         return WTF::move(m_escapees);
421     }
422
423     void escape(Node* node)
424     {
425         Node* identifier = follow(node);
426         if (!identifier)
427             return;
428
429         escapeAllocation(identifier);
430     }
431
432     void merge(const LocalHeap& other)
433     {
434         assertIsValid();
435         other.assertIsValid();
436         ASSERT(!m_wantEscapees);
437
438         if (!reached()) {
439             ASSERT(other.reached());
440             *this = other;
441             return;
442         }
443
444         HashSet<Node*> toEscape;
445
446         for (auto& allocationEntry : other.m_allocations)
447             m_allocations.add(allocationEntry.key, allocationEntry.value);
448         for (auto& allocationEntry : m_allocations) {
449             auto allocationIter = other.m_allocations.find(allocationEntry.key);
450
451             // If we have it and they don't, it died for them but we
452             // are keeping it alive from another field somewhere.
453             // There is nothing to do - we will be escaped
454             // automatically when we handle that other field.
455             // This will also happen for allocation that we have and
456             // they don't, and all of those will get pruned.
457             if (allocationIter == other.m_allocations.end())
458                 continue;
459
460             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
461                 toEscape.add(allocationEntry.key);
462                 for (const auto& fieldEntry : allocationIter->value.fields())
463                     toEscape.add(fieldEntry.value);
464             } else {
465                 mergePointerSets(
466                     allocationEntry.value.fields(), allocationIter->value.fields(),
467                     [&] (Node* identifier) {
468                         toEscape.add(identifier);
469                     },
470                     [&] (PromotedLocationDescriptor field) {
471                         allocationEntry.value.remove(field);
472                     });
473                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
474             }
475         }
476
477         mergePointerSets(m_pointers, other.m_pointers,
478             [&] (Node* identifier) {
479                 toEscape.add(identifier);
480             },
481             [&] (Node* field) {
482                 m_pointers.remove(field);
483             });
484
485         for (Node* identifier : toEscape)
486             escapeAllocation(identifier);
487
488         if (!ASSERT_DISABLED) {
489             for (const auto& entry : m_allocations)
490                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
491         }
492
493         // If there is no remaining pointer to an allocation, we can
494         // remove it. This should only happen for escaped allocations,
495         // because we only merge liveness-pruned heaps in the first
496         // place.
497         prune();
498
499         assertIsValid();
500     }
501
502     void pruneByLiveness(const HashSet<Node*>& live)
503     {
504         Vector<Node*> toRemove;
505         for (const auto& entry : m_pointers) {
506             if (!live.contains(entry.key))
507                 toRemove.append(entry.key);
508         }
509         for (Node* node : toRemove)
510             m_pointers.remove(node);
511
512         prune();
513     }
514
515     void assertIsValid() const
516     {
517         if (ASSERT_DISABLED)
518             return;
519
520         // Pointers should point to an actual allocation
521         for (const auto& entry : m_pointers) {
522             ASSERT_UNUSED(entry, entry.value);
523             ASSERT(m_allocations.contains(entry.value));
524         }
525
526         for (const auto& allocationEntry : m_allocations) {
527             // Fields should point to an actual allocation
528             for (const auto& fieldEntry : allocationEntry.value.fields()) {
529                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
530                 ASSERT(m_allocations.contains(fieldEntry.value));
531             }
532         }
533     }
534
535     bool operator==(const LocalHeap& other) const
536     {
537         assertIsValid();
538         other.assertIsValid();
539         return m_allocations == other.m_allocations
540             && m_pointers == other.m_pointers;
541     }
542
543     bool operator!=(const LocalHeap& other) const
544     {
545         return !(*this == other);
546     }
547
548     const HashMap<Node*, Allocation>& allocations() const
549     {
550         return m_allocations;
551     }
552
553     const HashMap<Node*, Node*>& pointers() const
554     {
555         return m_pointers;
556     }
557
558     void dump(PrintStream& out) const
559     {
560         out.print("  Allocations:\n");
561         for (const auto& entry : m_allocations)
562             out.print("    #", entry.key, ": ", entry.value, "\n");
563         out.print("  Pointers:\n");
564         for (const auto& entry : m_pointers)
565             out.print("    ", entry.key, " => #", entry.value, "\n");
566     }
567
568     bool reached() const
569     {
570         return m_reached;
571     }
572
573     void setReached()
574     {
575         m_reached = true;
576     }
577
578 private:
579     // When we merge two heaps, we escape all fields of allocations,
580     // unless they point to the same thing in both heaps.
581     // The reason for this is that it allows us not to do extra work
582     // for diamond graphs where we would otherwise have to check
583     // whether we have a single definition or not, which would be
584     // cumbersome.
585     //
586     // Note that we should try to unify nodes even when they are not
587     // from the same allocation; for instance we should be able to
588     // completely eliminate all allocations from the following graph:
589     //
590     // Block #0
591     //  0: NewObject({})
592     //  -: Branch(#1, #2)
593     //
594     // Block #1
595     //  1: NewObject({})
596     //  -: PutByOffset(@1, "left", val)
597     //  -: PutStructure(@1, {val:0})
598     //  -: PutByOffset(@0, @1, x)
599     //  -: PutStructure(@0, {x:0})
600     //  -: Jump(#3)
601     //
602     // Block #2
603     //  2: NewObject({})
604     //  -: PutByOffset(@2, "right", val)
605     //  -: PutStructure(@2, {val:0})
606     //  -: PutByOffset(@0, @2, x)
607     //  -: PutStructure(@0, {x:0})
608     //  -: Jump(#3)
609     //
610     // Block #3:
611     //  3: GetByOffset(@0, x)
612     //  4: GetByOffset(@3, val)
613     //  -: Return(@4)
614     template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
615     void mergePointerSets(
616         const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
617         const EscapeFunctor& escape, const RemoveFunctor& remove)
618     {
619         Vector<Key> toRemove;
620         for (const auto& entry : my) {
621             auto iter = their.find(entry.key);
622             if (iter == their.end()) {
623                 toRemove.append(entry.key);
624                 escape(entry.value);
625             } else if (iter->value != entry.value) {
626                 toRemove.append(entry.key);
627                 escape(entry.value);
628                 escape(iter->value);
629             }
630         }
631         for (const auto& entry : their) {
632             if (my.contains(entry.key))
633                 continue;
634             escape(entry.value);
635         }
636         for (Key key : toRemove)
637             remove(key);
638     }
639
640     void escapeAllocation(Node* identifier)
641     {
642         Allocation& allocation = getAllocation(identifier);
643         if (allocation.isEscapedAllocation())
644             return;
645
646         Allocation unescaped = WTF::move(allocation);
647         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
648
649         for (const auto& entry : unescaped.fields())
650             escapeAllocation(entry.value);
651
652         if (m_wantEscapees)
653             m_escapees.add(unescaped.identifier(), WTF::move(unescaped));
654     }
655
656     void prune()
657     {
658         HashSet<Node*> reachable;
659         for (const auto& entry : m_pointers)
660             reachable.add(entry.value);
661
662         // Repeatedly mark as reachable allocations in fields of other
663         // reachable allocations
664         {
665             Vector<Node*> worklist;
666             worklist.appendRange(reachable.begin(), reachable.end());
667
668             while (!worklist.isEmpty()) {
669                 Node* identifier = worklist.takeLast();
670                 Allocation& allocation = m_allocations.find(identifier)->value;
671                 for (const auto& entry : allocation.fields()) {
672                     if (reachable.add(entry.value).isNewEntry)
673                         worklist.append(entry.value);
674                 }
675             }
676         }
677
678         // Remove unreachable allocations
679         {
680             Vector<Node*> toRemove;
681             for (const auto& entry : m_allocations) {
682                 if (!reachable.contains(entry.key))
683                     toRemove.append(entry.key);
684             }
685             for (Node* identifier : toRemove)
686                 m_allocations.remove(identifier);
687         }
688     }
689
690     bool m_reached = false;
691     HashMap<Node*, Node*> m_pointers;
692     HashMap<Node*, Allocation> m_allocations;
693
694     bool m_wantEscapees = false;
695     HashMap<Node*, Allocation> m_escapees;
696 };
697
698 class ObjectAllocationSinkingPhase : public Phase {
699 public:
700     ObjectAllocationSinkingPhase(Graph& graph)
701         : Phase(graph, "object allocation elimination")
702         , m_pointerSSA(graph)
703         , m_allocationSSA(graph)
704         , m_insertionSet(graph)
705     {
706     }
707
708     bool run()
709     {
710         ASSERT(m_graph.m_form == SSA);
711         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
712
713         if (!performSinking())
714             return false;
715
716         if (verbose) {
717             dataLog("Graph after elimination:\n");
718             m_graph.dump();
719         }
720
721         return true;
722     }
723
724 private:
725     bool performSinking()
726     {
727         m_graph.computeRefCounts();
728         m_graph.initializeNodeOwners();
729         performLivenessAnalysis(m_graph);
730         performOSRAvailabilityAnalysis(m_graph);
731         m_combinedLiveness = CombinedLiveness(m_graph);
732
733         CString graphBeforeSinking;
734         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
735             StringPrintStream out;
736             m_graph.dump(out);
737             graphBeforeSinking = out.toCString();
738         }
739
740         if (verbose) {
741             dataLog("Graph before elimination:\n");
742             m_graph.dump();
743         }
744
745         performAnalysis();
746
747         if (!determineSinkCandidates())
748             return false;
749
750         if (verbose) {
751             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
752                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
753                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
754             }
755         }
756
757         promoteLocalHeap();
758
759         if (Options::validateGraphAtEachPhase())
760             validate(m_graph, DumpGraph, graphBeforeSinking);
761         return true;
762     }
763
764     void performAnalysis()
765     {
766         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
767         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
768
769         bool changed;
770         do {
771             if (verbose)
772                 dataLog("Doing iteration of escape analysis.\n");
773             changed = false;
774
775             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
776                 m_heapAtHead[block].setReached();
777                 m_heap = m_heapAtHead[block];
778
779                 for (Node* node : *block) {
780                     handleNode(
781                         node,
782                         [] (PromotedHeapLocation, LazyNode) { },
783                         [&] (PromotedHeapLocation) -> Node* {
784                             return nullptr;
785                         });
786                 }
787
788                 if (m_heap == m_heapAtTail[block])
789                     continue;
790
791                 m_heapAtTail[block] = m_heap;
792                 changed = true;
793
794                 m_heap.assertIsValid();
795
796                 // We keep only pointers that are live, and only
797                 // allocations that are either live, pointed to by a
798                 // live pointer, or (recursively) stored in a field of
799                 // a live allocation.
800                 //
801                 // This means we can accidentaly leak non-dominating
802                 // nodes into the successor. However, due to the
803                 // non-dominance property, we are guaranteed that the
804                 // successor has at least one predecessor that is not
805                 // dominated either: this means any reference to a
806                 // non-dominating allocation in the successor will
807                 // trigger an escape and get pruned during the merge.
808                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
809
810                 for (BasicBlock* successorBlock : block->successors())
811                     m_heapAtHead[successorBlock].merge(m_heap);
812             }
813         } while (changed);
814     }
815
816     template<typename WriteFunctor, typename ResolveFunctor>
817     void handleNode(
818         Node* node,
819         const WriteFunctor& heapWrite,
820         const ResolveFunctor& heapResolve)
821     {
822         m_heap.assertIsValid();
823         ASSERT(m_heap.takeEscapees().isEmpty());
824
825         Allocation* target = nullptr;
826         HashMap<PromotedLocationDescriptor, LazyNode> writes;
827         PromotedLocationDescriptor exactRead;
828
829         switch (node->op()) {
830         case NewObject:
831             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
832             target->setStructures(node->structure());
833             writes.add(
834                 StructurePLoc, LazyNode(m_graph.freeze(node->structure())));
835             break;
836
837         case NewFunction:
838         case NewArrowFunction: {
839             bool isArrowFunction = node->op() == NewArrowFunction;
840             if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid()) {
841                 m_heap.escape(node->child1().node());
842                 break;
843             }
844             
845             target = &m_heap.newAllocation(node, isArrowFunction ? Allocation::Kind::NewArrowFunction : Allocation::Kind::Function);
846             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
847             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
848             if (isArrowFunction)
849                 writes.add(ArrowFunctionBoundThisPLoc, LazyNode(node->child2().node()));
850             break;
851         }
852
853         case CreateActivation: {
854             if (node->castOperand<SymbolTable*>()->singletonScope()->isStillValid()) {
855                 m_heap.escape(node->child1().node());
856                 break;
857             }
858             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
859             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
860             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
861             {
862                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
863                 ConcurrentJITLocker locker(symbolTable->m_lock);
864                 LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
865                 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
866                     writes.add(
867                         PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
868                         initialValue);
869                 }
870             }
871             break;
872         }
873
874         case PutStructure:
875             target = m_heap.onlyLocalAllocation(node->child1().node());
876             if (target && target->isObjectAllocation()) {
877                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next))));
878                 target->setStructures(node->transition()->next);
879             } else
880                 m_heap.escape(node->child1().node());
881             break;
882
883         case CheckStructure: {
884             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
885             if (allocation && allocation->isObjectAllocation()) {
886                 allocation->filterStructures(node->structureSet());
887                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
888                     node->convertToCheckStructureImmediate(value);
889             } else
890                 m_heap.escape(node->child1().node());
891             break;
892         }
893
894         case GetByOffset:
895         case GetGetterSetterByOffset:
896             target = m_heap.onlyLocalAllocation(node->child2().node());
897             if (target && target->isObjectAllocation()) {
898                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
899                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
900             } else {
901                 m_heap.escape(node->child1().node());
902                 m_heap.escape(node->child2().node());
903             }
904             break;
905
906         case MultiGetByOffset: {
907             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
908             if (allocation && allocation->isObjectAllocation()) {
909                 MultiGetByOffsetData& data = node->multiGetByOffsetData();
910                 StructureSet validStructures;
911                 bool hasInvalidStructures = false;
912                 for (const auto& multiGetByOffsetCase : data.cases) {
913                     if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
914                         continue;
915
916                     switch (multiGetByOffsetCase.method().kind()) {
917                     case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
918                     case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
919                         hasInvalidStructures = true;
920                         break;
921
922                     case GetByOffsetMethod::Load: // We're good
923                         validStructures.merge(multiGetByOffsetCase.set());
924                         break;
925
926                     default:
927                         RELEASE_ASSERT_NOT_REACHED();
928                     }
929                 }
930                 if (hasInvalidStructures) {
931                     m_heap.escape(node->child1().node());
932                     break;
933                 }
934                 unsigned identifierNumber = data.identifierNumber;
935                 PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
936                 if (Node* value = heapResolve(location)) {
937                     if (allocation->structures().isSubsetOf(validStructures))
938                         node->replaceWith(value);
939                     else {
940                         Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
941                         ASSERT(structure);
942                         allocation->filterStructures(validStructures);
943                         node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
944                         node->convertToCheckStructureImmediate(structure);
945                         node->setReplacement(value);
946                     }
947                 } else if (!allocation->structures().isSubsetOf(validStructures)) {
948                     // Even though we don't need the result here, we still need
949                     // to make the call to tell our caller that we could need
950                     // the StructurePLoc.
951                     // The reason for this is that when we decide not to sink a
952                     // node, we will still lower any read to its fields before
953                     // it escapes (which are usually reads across a function
954                     // call that DFGClobberize can't handle) - but we only do
955                     // this for PromotedHeapLocations that we have seen read
956                     // during the analysis!
957                     heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
958                     allocation->filterStructures(validStructures);
959                 }
960                 Node* identifier = allocation->get(location.descriptor());
961                 if (identifier)
962                     m_heap.newPointer(node, identifier);
963             } else
964                 m_heap.escape(node->child1().node());
965             break;
966         }
967
968         case PutByOffset:
969             target = m_heap.onlyLocalAllocation(node->child2().node());
970             if (target && target->isObjectAllocation()) {
971                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
972                 writes.add(
973                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
974                     LazyNode(node->child3().node()));
975             } else {
976                 m_heap.escape(node->child1().node());
977                 m_heap.escape(node->child2().node());
978                 m_heap.escape(node->child3().node());
979             }
980             break;
981
982         case GetClosureVar:
983             target = m_heap.onlyLocalAllocation(node->child1().node());
984             if (target && target->isActivationAllocation()) {
985                 exactRead =
986                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
987             } else
988                 m_heap.escape(node->child1().node());
989             break;
990
991         case PutClosureVar:
992             target = m_heap.onlyLocalAllocation(node->child1().node());
993             if (target && target->isActivationAllocation()) {
994                 writes.add(
995                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
996                     LazyNode(node->child2().node()));
997             } else {
998                 m_heap.escape(node->child1().node());
999                 m_heap.escape(node->child2().node());
1000             }
1001             break;
1002
1003         case SkipScope:
1004             target = m_heap.onlyLocalAllocation(node->child1().node());
1005             if (target && target->isActivationAllocation())
1006                 exactRead = ActivationScopePLoc;
1007             else
1008                 m_heap.escape(node->child1().node());
1009             break;
1010
1011         case GetExecutable:
1012             target = m_heap.onlyLocalAllocation(node->child1().node());
1013             if (target && target->isFunctionAllocation())
1014                 exactRead = FunctionExecutablePLoc;
1015             else
1016                 m_heap.escape(node->child1().node());
1017             break;
1018
1019         case LoadArrowFunctionThis:
1020             target = m_heap.onlyLocalAllocation(node->child1().node());
1021             if (target && target->isArrowFunctionAllocation())
1022                 exactRead = ArrowFunctionBoundThisPLoc;
1023             else
1024                 m_heap.escape(node->child1().node());
1025             break;
1026         
1027         case GetScope:
1028             target = m_heap.onlyLocalAllocation(node->child1().node());
1029             if (target && target->isFunctionAllocation())
1030                 exactRead = FunctionActivationPLoc;
1031             else
1032                 m_heap.escape(node->child1().node());
1033             break;
1034
1035         case Check:
1036             m_graph.doToChildren(
1037                 node,
1038                 [&] (Edge edge) {
1039                     if (edge.willNotHaveCheck())
1040                         return;
1041
1042                     if (alreadyChecked(edge.useKind(), SpecObject))
1043                         return;
1044
1045                     m_heap.escape(edge.node());
1046                 });
1047             break;
1048
1049         case MovHint:
1050         case PutHint:
1051             // Handled by OSR availability analysis
1052             break;
1053
1054         default:
1055             m_graph.doToChildren(
1056                 node,
1057                 [&] (Edge edge) {
1058                     m_heap.escape(edge.node());
1059                 });
1060             break;
1061         }
1062
1063         if (exactRead) {
1064             ASSERT(target);
1065             ASSERT(writes.isEmpty());
1066             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1067                 ASSERT(!value->replacement());
1068                 node->replaceWith(value);
1069             }
1070             Node* identifier = target->get(exactRead);
1071             if (identifier)
1072                 m_heap.newPointer(node, identifier);
1073         }
1074
1075         for (auto entry : writes) {
1076             ASSERT(target);
1077             if (entry.value.isNode())
1078                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1079             else
1080                 target->remove(entry.key);
1081             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1082         }
1083
1084         m_heap.assertIsValid();
1085     }
1086
1087     bool determineSinkCandidates()
1088     {
1089         m_sinkCandidates.clear();
1090         m_materializationToEscapee.clear();
1091         m_materializationSiteToMaterializations.clear();
1092         m_materializationSiteToRecoveries.clear();
1093
1094         // Logically we wish to consider every allocation and sink
1095         // it. However, it is probably not profitable to sink an
1096         // allocation that will always escape. So, we only sink an
1097         // allocation if one of the following is true:
1098         //
1099         // 1) There exists a basic block with only backwards outgoing
1100         //    edges (or no outgoing edges) in which the node wasn't
1101         //    materialized. This is meant to catch
1102         //    effectively-infinite loops in which we don't need to
1103         //    have allocated the object.
1104         //
1105         // 2) There exists a basic block at the tail of which the node
1106         //    is dead and not materialized.
1107         //
1108         // 3) The sum of execution counts of the materializations is
1109         //    less than the sum of execution counts of the original
1110         //    node.
1111         //
1112         // We currently implement only rule #2.
1113         // FIXME: Implement the two other rules.
1114         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1115         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1116         //
1117         // However, these rules allow for a sunk object to be put into
1118         // a non-sunk one, which we don't support. We could solve this
1119         // by supporting PutHints on local allocations, making these
1120         // objects only partially correct, and we would need to adapt
1121         // the OSR availability analysis and OSR exit to handle
1122         // this. This would be totally doable, but would create a
1123         // super rare, and thus bug-prone, code path.
1124         // So, instead, we need to implement one of the following
1125         // closure rules:
1126         //
1127         // 1) If we put a sink candidate into a local allocation that
1128         //    is not a sink candidate, change our minds and don't
1129         //    actually sink the sink candidate.
1130         //
1131         // 2) If we put a sink candidate into a local allocation, that
1132         //    allocation becomes a sink candidate as well.
1133         //
1134         // We currently choose to implement closure rule #2.
1135         HashMap<Node*, Vector<Node*>> dependencies;
1136         bool hasUnescapedReads = false;
1137         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1138             m_heap = m_heapAtHead[block];
1139
1140             for (Node* node : *block) {
1141                 handleNode(
1142                     node,
1143                     [&] (PromotedHeapLocation location, LazyNode value) {
1144                         if (!value.isNode())
1145                             return;
1146
1147                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1148                         if (allocation && !allocation->isEscapedAllocation())
1149                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1150                     },
1151                     [&] (PromotedHeapLocation) -> Node* {
1152                         hasUnescapedReads = true;
1153                         return nullptr;
1154                     });
1155             }
1156
1157             // The sink candidates are initially the unescaped
1158             // allocations dying at tail of blocks
1159             HashSet<Node*> allocations;
1160             for (const auto& entry : m_heap.allocations()) {
1161                 if (!entry.value.isEscapedAllocation())
1162                     allocations.add(entry.key);
1163             }
1164
1165             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1166
1167             for (Node* identifier : allocations) {
1168                 if (!m_heap.isAllocation(identifier))
1169                     m_sinkCandidates.add(identifier);
1170             }
1171         }
1172
1173         // Ensure that the set of sink candidates is closed for put operations
1174         Vector<Node*> worklist;
1175         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1176
1177         while (!worklist.isEmpty()) {
1178             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1179                 if (m_sinkCandidates.add(identifier).isNewEntry)
1180                     worklist.append(identifier);
1181             }
1182         }
1183
1184         if (m_sinkCandidates.isEmpty())
1185             return hasUnescapedReads;
1186
1187         if (verbose)
1188             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1189
1190         // Create the materialization nodes
1191         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1192             m_heap = m_heapAtHead[block];
1193             m_heap.setWantEscapees();
1194
1195             for (Node* node : *block) {
1196                 handleNode(
1197                     node,
1198                     [] (PromotedHeapLocation, LazyNode) { },
1199                     [] (PromotedHeapLocation) -> Node* {
1200                         return nullptr;
1201                     });
1202                 auto escapees = m_heap.takeEscapees();
1203                 if (!escapees.isEmpty())
1204                     placeMaterializations(escapees, node);
1205             }
1206
1207             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1208
1209             {
1210                 HashMap<Node*, Allocation> escapingOnEdge;
1211                 for (const auto& entry : m_heap.allocations()) {
1212                     if (entry.value.isEscapedAllocation())
1213                         continue;
1214
1215                     bool mustEscape = false;
1216                     for (BasicBlock* successorBlock : block->successors()) {
1217                         if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1218                             || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1219                             mustEscape = true;
1220                     }
1221
1222                     if (mustEscape)
1223                         escapingOnEdge.add(entry.key, entry.value);
1224                 }
1225                 placeMaterializations(WTF::move(escapingOnEdge), block->terminal());
1226             }
1227         }
1228
1229         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1230     }
1231
1232     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1233     {
1234         // We don't create materializations if the escapee is not a
1235         // sink candidate
1236         Vector<Node*> toRemove;
1237         for (const auto& entry : escapees) {
1238             if (!m_sinkCandidates.contains(entry.key))
1239                 toRemove.append(entry.key);
1240         }
1241         for (Node* identifier : toRemove)
1242             escapees.remove(identifier);
1243
1244         if (escapees.isEmpty())
1245             return;
1246
1247         // First collect the hints that will be needed when the node
1248         // we materialize is still stored into other unescaped sink candidates
1249         Vector<PromotedHeapLocation> hints;
1250         for (const auto& entry : m_heap.allocations()) {
1251             if (escapees.contains(entry.key))
1252                 continue;
1253
1254             for (const auto& field : entry.value.fields()) {
1255                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1256                 if (escapees.contains(field.value) && !field.key.neededForMaterialization())
1257                     hints.append(PromotedHeapLocation(entry.key, field.key));
1258             }
1259         }
1260
1261         // Now we need to order the materialization. Any order is
1262         // valid (as long as we materialize a node first if it is
1263         // needed for the materialization of another node, e.g. a
1264         // function's activation must be materialized before the
1265         // function itself), but we want to try minimizing the number
1266         // of times we have to place Puts to close cycles after a
1267         // materialization. In other words, we are trying to find the
1268         // minimum number of materializations to remove from the
1269         // materialization graph to make it a DAG, known as the
1270         // (vertex) feedback set problem. Unfortunately, this is a
1271         // NP-hard problem, which we don't want to solve exactly.
1272         //
1273         // Instead, we use a simple greedy procedure, that procedes as
1274         // follow:
1275         //  - While there is at least one node with no outgoing edge
1276         //    amongst the remaining materializations, materialize it
1277         //    first
1278         //
1279         //  - Similarily, while there is at least one node with no
1280         //    incoming edge amongst the remaining materializations,
1281         //    materialize it last.
1282         //
1283         //  - When both previous conditions are false, we have an
1284         //    actual cycle, and we need to pick a node to
1285         //    materialize. We try greedily to remove the "pressure" on
1286         //    the remaining nodes by choosing the node with maximum
1287         //    |incoming edges| * |outgoing edges| as a measure of how
1288         //    "central" to the graph it is. We materialize it first,
1289         //    so that all the recoveries will be Puts of things into
1290         //    it (rather than Puts of the materialization into other
1291         //    objects), which means we will have a single
1292         //    StoreBarrier.
1293
1294
1295         // Compute dependencies between materializations
1296         HashMap<Node*, HashSet<Node*>> dependencies;
1297         HashMap<Node*, HashSet<Node*>> reverseDependencies;
1298         HashMap<Node*, HashSet<Node*>> forMaterialization;
1299         for (const auto& entry : escapees) {
1300             auto& myDependencies = dependencies.add(entry.key, HashSet<Node*>()).iterator->value;
1301             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, HashSet<Node*>()).iterator->value;
1302             reverseDependencies.add(entry.key, HashSet<Node*>());
1303             for (const auto& field : entry.value.fields()) {
1304                 if (escapees.contains(field.value) && field.value != entry.key) {
1305                     myDependencies.add(field.value);
1306                     reverseDependencies.add(field.value, HashSet<Node*>()).iterator->value.add(entry.key);
1307                     if (field.key.neededForMaterialization())
1308                         myDependenciesForMaterialization.add(field.value);
1309                 }
1310             }
1311         }
1312
1313         // Helper function to update the materialized set and the
1314         // dependencies
1315         HashSet<Node*> materialized;
1316         auto materialize = [&] (Node* identifier) {
1317             materialized.add(identifier);
1318             for (Node* dep : dependencies.get(identifier))
1319                 reverseDependencies.find(dep)->value.remove(identifier);
1320             for (Node* rdep : reverseDependencies.get(identifier)) {
1321                 dependencies.find(rdep)->value.remove(identifier);
1322                 forMaterialization.find(rdep)->value.remove(identifier);
1323             }
1324             dependencies.remove(identifier);
1325             reverseDependencies.remove(identifier);
1326             forMaterialization.remove(identifier);
1327         };
1328
1329         // Nodes without remaining unmaterialized fields will be
1330         // materialized first - amongst the remaining unmaterialized
1331         // nodes
1332         std::list<Allocation> toMaterialize;
1333         auto firstPos = toMaterialize.begin();
1334         auto materializeFirst = [&] (Allocation&& allocation) {
1335             materialize(allocation.identifier());
1336             // We need to insert *after* the current position
1337             if (firstPos != toMaterialize.end())
1338                 ++firstPos;
1339             firstPos = toMaterialize.insert(firstPos, WTF::move(allocation));
1340         };
1341
1342         // Nodes that no other unmaterialized node points to will be
1343         // materialized last - amongst the remaining unmaterialized
1344         // nodes
1345         auto lastPos = toMaterialize.end();
1346         auto materializeLast = [&] (Allocation&& allocation) {
1347             materialize(allocation.identifier());
1348             lastPos = toMaterialize.insert(lastPos, WTF::move(allocation));
1349         };
1350
1351         // These are the promoted locations that contains some of the
1352         // allocations we are currently escaping. If they are a location on
1353         // some other allocation we are currently materializing, we will need
1354         // to "recover" their value with a real put once the corresponding
1355         // allocation is materialized; if they are a location on some other
1356         // not-yet-materialized allocation, we will need a PutHint.
1357         Vector<PromotedHeapLocation> toRecover;
1358
1359         // This loop does the actual cycle breaking
1360         while (!escapees.isEmpty()) {
1361             materialized.clear();
1362
1363             // Materialize nodes that won't require recoveries if we can
1364             for (auto& entry : escapees) {
1365                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1366                     continue;
1367
1368                 if (dependencies.find(entry.key)->value.isEmpty()) {
1369                     materializeFirst(WTF::move(entry.value));
1370                     continue;
1371                 }
1372
1373                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1374                     materializeLast(WTF::move(entry.value));
1375                     continue;
1376                 }
1377             }
1378
1379             // We reach this only if there is an actual cycle that needs
1380             // breaking. Because we do not want to solve a NP-hard problem
1381             // here, we just heuristically pick a node and materialize it
1382             // first.
1383             if (materialized.isEmpty()) {
1384                 uint64_t maxEvaluation = 0;
1385                 Allocation* bestAllocation;
1386                 for (auto& entry : escapees) {
1387                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1388                         continue;
1389
1390                     uint64_t evaluation =
1391                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1392                     if (evaluation > maxEvaluation) {
1393                         maxEvaluation = evaluation;
1394                         bestAllocation = &entry.value;
1395                     }
1396                 }
1397                 RELEASE_ASSERT(maxEvaluation > 0);
1398
1399                 materializeFirst(WTF::move(*bestAllocation));
1400             }
1401             RELEASE_ASSERT(!materialized.isEmpty());
1402
1403             for (Node* identifier : materialized)
1404                 escapees.remove(identifier);
1405         }
1406
1407         materialized.clear();
1408
1409         HashSet<Node*> escaped;
1410         for (const Allocation& allocation : toMaterialize)
1411             escaped.add(allocation.identifier());
1412         for (const Allocation& allocation : toMaterialize) {
1413             for (const auto& field : allocation.fields()) {
1414                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1415                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1416             }
1417             materialized.add(allocation.identifier());
1418         }
1419
1420         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1421             where, Vector<Node*>()).iterator->value;
1422
1423         for (const Allocation& allocation : toMaterialize) {
1424             Node* materialization = createMaterialization(allocation, where);
1425             materializations.append(materialization);
1426             m_materializationToEscapee.add(materialization, allocation.identifier());
1427         }
1428
1429         if (!toRecover.isEmpty()) {
1430             m_materializationSiteToRecoveries.add(
1431                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1432         }
1433
1434         // The hints need to be after the "real" recoveries so that we
1435         // don't hint not-yet-complete objects
1436         if (!hints.isEmpty()) {
1437             m_materializationSiteToRecoveries.add(
1438                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(hints);
1439         }
1440     }
1441
1442     Node* createMaterialization(const Allocation& allocation, Node* where)
1443     {
1444         // FIXME: This is the only place where we actually use the
1445         // fact that an allocation's identifier is indeed the node
1446         // that created the allocation.
1447         switch (allocation.kind()) {
1448         case Allocation::Kind::Object: {
1449             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1450             StructureSet* set = m_graph.addStructureSet(allocation.structures());
1451
1452             return m_graph.addNode(
1453                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1454                 where->origin.withSemantic(allocation.identifier()->origin.semantic),
1455                 OpInfo(set), OpInfo(data), 0, 0);
1456         }
1457
1458         case Allocation::Kind::NewArrowFunction:
1459         case Allocation::Kind::Function: {
1460             FrozenValue* executable = allocation.identifier()->cellOperand();
1461             
1462             NodeType nodeType = allocation.kind() == Allocation::Kind::NewArrowFunction ? NewArrowFunction : NewFunction;
1463             
1464             return m_graph.addNode(
1465                 allocation.identifier()->prediction(), nodeType,
1466                 where->origin.withSemantic(
1467                     allocation.identifier()->origin.semantic),
1468                 OpInfo(executable));
1469             break;
1470         }
1471
1472         case Allocation::Kind::Activation: {
1473             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1474             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1475
1476             return m_graph.addNode(
1477                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1478                 where->origin.withSemantic(
1479                     allocation.identifier()->origin.semantic),
1480                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1481         }
1482
1483         default:
1484             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1485         }
1486     }
1487
1488     void promoteLocalHeap()
1489     {
1490         // Collect the set of heap locations that we will be operating
1491         // over.
1492         HashSet<PromotedHeapLocation> locations;
1493         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1494             m_heap = m_heapAtHead[block];
1495
1496             for (Node* node : *block) {
1497                 handleNode(
1498                     node,
1499                     [&] (PromotedHeapLocation location, LazyNode) {
1500                         // If the location is not on a sink candidate,
1501                         // we only sink it if it is read
1502                         if (m_sinkCandidates.contains(location.base()))
1503                             locations.add(location);
1504                     },
1505                     [&] (PromotedHeapLocation location) -> Node* {
1506                         locations.add(location);
1507                         return nullptr;
1508                     });
1509             }
1510         }
1511
1512         // Figure out which locations belong to which allocations.
1513         m_locationsForAllocation.clear();
1514         for (PromotedHeapLocation location : locations) {
1515             auto result = m_locationsForAllocation.add(
1516                 location.base(),
1517                 Vector<PromotedHeapLocation>());
1518             ASSERT(!result.iterator->value.contains(location));
1519             result.iterator->value.append(location);
1520         }
1521
1522         m_pointerSSA.reset();
1523         m_allocationSSA.reset();
1524
1525         // Collect the set of "variables" that we will be sinking.
1526         m_locationToVariable.clear();
1527         m_nodeToVariable.clear();
1528         Vector<Node*> indexToNode;
1529         Vector<PromotedHeapLocation> indexToLocation;
1530
1531         for (Node* index : m_sinkCandidates) {
1532             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1533             m_nodeToVariable.add(index, variable);
1534             ASSERT(indexToNode.size() == variable->index());
1535             indexToNode.append(index);
1536         }
1537
1538         for (PromotedHeapLocation location : locations) {
1539             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1540             m_locationToVariable.add(location, variable);
1541             ASSERT(indexToLocation.size() == variable->index());
1542             indexToLocation.append(location);
1543         }
1544
1545         // We insert all required constants at top of block 0 so that
1546         // they are inserted only once and we don't clutter the graph
1547         // with useless constants everywhere
1548         HashMap<FrozenValue*, Node*> lazyMapping;
1549         if (!m_bottom)
1550             m_bottom = m_insertionSet.insertConstant(0, NodeOrigin(), jsNumber(1927));
1551         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1552             m_heap = m_heapAtHead[block];
1553
1554             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1555                 Node* node = block->at(nodeIndex);
1556
1557                 // Some named properties can be added conditionally,
1558                 // and that would necessitate bottoms
1559                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1560                     if (location.kind() != NamedPropertyPLoc)
1561                         continue;
1562
1563                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1564                     m_pointerSSA.newDef(variable, block, m_bottom);
1565                 }
1566
1567                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1568                     Node* escapee = m_materializationToEscapee.get(materialization);
1569                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1570                 }
1571
1572                 if (m_sinkCandidates.contains(node))
1573                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1574
1575                 handleNode(
1576                     node,
1577                     [&] (PromotedHeapLocation location, LazyNode value) {
1578                         if (!locations.contains(location))
1579                             return;
1580
1581                         Node* nodeValue;
1582                         if (value.isNode())
1583                             nodeValue = value.asNode();
1584                         else {
1585                             auto iter = lazyMapping.find(value.asValue());
1586                             if (iter != lazyMapping.end())
1587                                 nodeValue = iter->value;
1588                             else {
1589                                 nodeValue = value.ensureIsNode(
1590                                     m_insertionSet, m_graph.block(0), 0);
1591                                 lazyMapping.add(value.asValue(), nodeValue);
1592                             }
1593                         }
1594
1595                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1596                         m_pointerSSA.newDef(variable, block, nodeValue);
1597                     },
1598                     [] (PromotedHeapLocation) -> Node* {
1599                         return nullptr;
1600                     });
1601             }
1602         }
1603         m_insertionSet.execute(m_graph.block(0));
1604
1605         // Run the SSA calculators to create Phis
1606         m_pointerSSA.computePhis(
1607             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1608                 PromotedHeapLocation location = indexToLocation[variable->index()];
1609
1610                 // Don't create Phi nodes for fields of dead allocations
1611                 if (!m_heapAtHead[block].isAllocation(location.base()))
1612                     return nullptr;
1613
1614                 // Don't create Phi nodes once we are escaped
1615                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1616                     return nullptr;
1617
1618                 // If we point to a single allocation, we will
1619                 // directly use its materialization
1620                 if (m_heapAtHead[block].follow(location))
1621                     return nullptr;
1622
1623                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
1624                 phiNode->mergeFlags(NodeResultJS);
1625                 return phiNode;
1626             });
1627
1628         m_allocationSSA.computePhis(
1629             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1630                 Node* identifier = indexToNode[variable->index()];
1631
1632                 // Don't create Phi nodes for dead allocations
1633                 if (!m_heapAtHead[block].isAllocation(identifier))
1634                     return nullptr;
1635
1636                 // Don't create Phi nodes until we are escaped
1637                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1638                     return nullptr;
1639
1640                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
1641                 phiNode->mergeFlags(NodeResultJS);
1642                 return phiNode;
1643             });
1644
1645         // Place Phis in the right places, replace all uses of any load with the appropriate
1646         // value, and create the materialization nodes.
1647         LocalOSRAvailabilityCalculator availabilityCalculator;
1648         m_graph.clearReplacements();
1649         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1650             m_heap = m_heapAtHead[block];
1651             availabilityCalculator.beginBlock(block);
1652
1653             // These mapping tables are intended to be lazy. If
1654             // something is omitted from the table, it means that
1655             // there haven't been any local stores to the promoted
1656             // heap location (or any local materialization).
1657             m_localMapping.clear();
1658             m_escapeeToMaterialization.clear();
1659
1660             // Insert the Phi functions that we had previously
1661             // created.
1662             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1663                 SSACalculator::Variable* variable = phiDef->variable();
1664                 m_insertionSet.insert(0, phiDef->value());
1665
1666                 PromotedHeapLocation location = indexToLocation[variable->index()];
1667                 m_localMapping.set(location, phiDef->value());
1668
1669                 if (m_sinkCandidates.contains(location.base())) {
1670                     m_insertionSet.insert(
1671                         0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
1672                 }
1673             }
1674
1675             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1676                 SSACalculator::Variable* variable = phiDef->variable();
1677                 m_insertionSet.insert(0, phiDef->value());
1678
1679                 Node* identifier = indexToNode[variable->index()];
1680                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1681                 insertOSRHintsForUpdate(0, NodeOrigin(), availabilityCalculator.m_availability, identifier, phiDef->value());
1682             }
1683
1684             if (verbose) {
1685                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1686                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1687             }
1688
1689             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1690                 Node* node = block->at(nodeIndex);
1691                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1692                     if (location.kind() != NamedPropertyPLoc)
1693                         continue;
1694
1695                     m_localMapping.set(location, m_bottom);
1696
1697                     if (m_sinkCandidates.contains(node)) {
1698                         m_insertionSet.insert(
1699                             nodeIndex + 1,
1700                             location.createHint(m_graph, node->origin, m_bottom));
1701                     }
1702                 }
1703
1704                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1705                     Node* escapee = m_materializationToEscapee.get(materialization);
1706                     populateMaterialization(block, materialization, escapee);
1707                     m_escapeeToMaterialization.set(escapee, materialization);
1708                     m_insertionSet.insert(nodeIndex, materialization);
1709                     if (verbose)
1710                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1711                 }
1712
1713                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1714                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node));
1715
1716                 // We need to put the OSR hints after the recoveries,
1717                 // because we only want the hints once the object is
1718                 // complete
1719                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1720                     Node* escapee = m_materializationToEscapee.get(materialization);
1721                     insertOSRHintsForUpdate(
1722                         nodeIndex, node->origin,
1723                         availabilityCalculator.m_availability, escapee, materialization);
1724                 }
1725
1726                 if (m_sinkCandidates.contains(node))
1727                     m_escapeeToMaterialization.set(node, node);
1728
1729                 availabilityCalculator.executeNode(node);
1730
1731                 bool doLower = false;
1732                 handleNode(
1733                     node,
1734                     [&] (PromotedHeapLocation location, LazyNode value) {
1735                         if (!locations.contains(location))
1736                             return;
1737
1738                         Node* nodeValue;
1739                         if (value.isNode())
1740                             nodeValue = value.asNode();
1741                         else
1742                             nodeValue = lazyMapping.get(value.asValue());
1743
1744                         nodeValue = resolve(block, nodeValue);
1745
1746                         m_localMapping.set(location, nodeValue);
1747
1748                         if (!m_sinkCandidates.contains(location.base()))
1749                             return;
1750
1751                         doLower = true;
1752
1753                         m_insertionSet.insert(nodeIndex + 1,
1754                             location.createHint(m_graph, node->origin, nodeValue));
1755                     },
1756                     [&] (PromotedHeapLocation location) -> Node* {
1757                         return resolve(block, location);
1758                     });
1759
1760                 if (m_sinkCandidates.contains(node) || doLower) {
1761                     switch (node->op()) {
1762                     case NewObject:
1763                         node->convertToPhantomNewObject();
1764                         break;
1765
1766                     case NewArrowFunction:
1767                     case NewFunction:
1768                         node->convertToPhantomNewFunction();
1769                         break;
1770
1771                     case CreateActivation:
1772                         node->convertToPhantomCreateActivation();
1773                         break;
1774
1775                     default:
1776                         node->remove();
1777                         break;
1778                     }
1779                 }
1780
1781                 m_graph.doToChildren(
1782                     node,
1783                     [&] (Edge& edge) {
1784                         edge.setNode(resolve(block, edge.node()));
1785                     });
1786             }
1787
1788             // Gotta drop some Upsilons.
1789             NodeAndIndex terminal = block->findTerminal();
1790             size_t upsilonInsertionPoint = terminal.index;
1791             NodeOrigin upsilonOrigin = terminal.node->origin;
1792             for (BasicBlock* successorBlock : block->successors()) {
1793                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1794                     Node* phiNode = phiDef->value();
1795                     SSACalculator::Variable* variable = phiDef->variable();
1796                     PromotedHeapLocation location = indexToLocation[variable->index()];
1797                     Node* incoming = resolve(block, location);
1798
1799                     m_insertionSet.insertNode(
1800                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1801                         OpInfo(phiNode), incoming->defaultEdge());
1802                 }
1803
1804                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1805                     Node* phiNode = phiDef->value();
1806                     SSACalculator::Variable* variable = phiDef->variable();
1807                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1808
1809                     m_insertionSet.insertNode(
1810                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1811                         OpInfo(phiNode), incoming->defaultEdge());
1812                 }
1813             }
1814
1815             m_insertionSet.execute(block);
1816         }
1817     }
1818
1819     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1820     {
1821         // If we are currently pointing to a single local allocation,
1822         // simply return the associated materialization.
1823         if (Node* identifier = m_heap.follow(location))
1824             return getMaterialization(block, identifier);
1825
1826         if (Node* result = m_localMapping.get(location))
1827             return result;
1828
1829         // This implies that there is no local mapping. Find a non-local mapping.
1830         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1831             block, m_locationToVariable.get(location));
1832         ASSERT(def);
1833         ASSERT(def->value());
1834
1835         Node* result = def->value();
1836
1837         ASSERT(!result->replacement());
1838
1839         m_localMapping.add(location, result);
1840         return result;
1841     }
1842
1843     Node* resolve(BasicBlock* block, Node* node)
1844     {
1845         // If we are currently pointing to a single local allocation,
1846         // simply return the associated materialization.
1847         if (Node* identifier = m_heap.follow(node))
1848             return getMaterialization(block, identifier);
1849
1850         if (node->replacement())
1851             node = node->replacement();
1852         ASSERT(!node->replacement());
1853
1854         return node;
1855     }
1856
1857     Node* getMaterialization(BasicBlock* block, Node* identifier)
1858     {
1859         ASSERT(m_heap.isAllocation(identifier));
1860         if (!m_sinkCandidates.contains(identifier))
1861             return identifier;
1862
1863         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1864             return materialization;
1865
1866         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1867             block, m_nodeToVariable.get(identifier));
1868         ASSERT(def && def->value());
1869         m_escapeeToMaterialization.add(identifier, def->value());
1870         ASSERT(!def->value()->replacement());
1871         return def->value();
1872     }
1873
1874     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, AvailabilityMap& availability, Node* escapee, Node* materialization)
1875     {
1876         // We need to follow() the value in the heap.
1877         // Consider the following graph:
1878         //
1879         // Block #0
1880         //   0: NewObject({})
1881         //   1: NewObject({})
1882         //   -: PutByOffset(@0, @1, x:0)
1883         //   -: PutStructure(@0, {x:0})
1884         //   2: GetByOffset(@0, x:0)
1885         //   -: MovHint(@2, loc1)
1886         //   -: Branch(#1, #2)
1887         //
1888         // Block #1
1889         //   3: Call(f, @1)
1890         //   4: Return(@0)
1891         //
1892         // Block #2
1893         //   -: Return(undefined)
1894         //
1895         // We need to materialize @1 at @3, and when doing so we need
1896         // to insert a MovHint for the materialization into loc1 as
1897         // well.
1898         // In order to do this, we say that we need to insert an
1899         // update hint for any availability whose node resolve()s to
1900         // the materialization.
1901         for (auto entry : availability.m_heap) {
1902             if (!entry.value.hasNode())
1903                 continue;
1904             if (m_heap.follow(entry.value.node()) != escapee)
1905                 continue;
1906
1907             m_insertionSet.insert(
1908                 nodeIndex, entry.key.createHint(m_graph, origin, materialization));
1909         }
1910
1911         for (unsigned i = availability.m_locals.size(); i--;) {
1912             if (!availability.m_locals[i].hasNode())
1913                 continue;
1914             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
1915                 continue;
1916
1917             int operand = availability.m_locals.operandForIndex(i);
1918             m_insertionSet.insertNode(
1919                 nodeIndex, SpecNone, MovHint, origin, OpInfo(operand),
1920                 materialization->defaultEdge());
1921         }
1922     }
1923
1924     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
1925     {
1926         Allocation& allocation = m_heap.getAllocation(escapee);
1927         switch (node->op()) {
1928         case MaterializeNewObject: {
1929             ObjectMaterializationData& data = node->objectMaterializationData();
1930             unsigned firstChild = m_graph.m_varArgChildren.size();
1931
1932             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1933
1934             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
1935             ASSERT(locations.contains(structure));
1936
1937             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
1938
1939             for (PromotedHeapLocation location : locations) {
1940                 switch (location.kind()) {
1941                 case StructurePLoc:
1942                     ASSERT(location == structure);
1943                     break;
1944
1945                 case NamedPropertyPLoc: {
1946                     ASSERT(location.base() == allocation.identifier());
1947                     data.m_properties.append(PhantomPropertyValue(location.info()));
1948                     Node* value = resolve(block, location);
1949                     if (m_sinkCandidates.contains(value))
1950                         m_graph.m_varArgChildren.append(m_bottom);
1951                     else
1952                         m_graph.m_varArgChildren.append(value);
1953                     break;
1954                 }
1955
1956                 default:
1957                     DFG_CRASH(m_graph, node, "Bad location kind");
1958                 }
1959             }
1960
1961             node->children = AdjacencyList(
1962                 AdjacencyList::Variable,
1963                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1964             break;
1965         }
1966
1967         case MaterializeCreateActivation: {
1968             ObjectMaterializationData& data = node->objectMaterializationData();
1969
1970             unsigned firstChild = m_graph.m_varArgChildren.size();
1971
1972             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1973
1974             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
1975             ASSERT(locations.contains(symbolTable));
1976             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
1977             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
1978
1979             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
1980             ASSERT(locations.contains(scope));
1981             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
1982
1983             for (PromotedHeapLocation location : locations) {
1984                 switch (location.kind()) {
1985                 case ActivationScopePLoc: {
1986                     ASSERT(location == scope);
1987                     break;
1988                 }
1989
1990                 case ActivationSymbolTablePLoc: {
1991                     ASSERT(location == symbolTable);
1992                     break;
1993                 }
1994
1995                 case ClosureVarPLoc: {
1996                     ASSERT(location.base() == allocation.identifier());
1997                     data.m_properties.append(PhantomPropertyValue(location.info()));
1998                     Node* value = resolve(block, location);
1999                     if (m_sinkCandidates.contains(value))
2000                         m_graph.m_varArgChildren.append(m_bottom);
2001                     else
2002                         m_graph.m_varArgChildren.append(value);
2003                     break;
2004                 }
2005
2006                 default:
2007                     DFG_CRASH(m_graph, node, "Bad location kind");
2008                 }
2009             }
2010
2011             node->children = AdjacencyList(
2012                 AdjacencyList::Variable,
2013                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2014             break;
2015         }
2016         
2017         case NewFunction:
2018         case NewArrowFunction: {
2019             bool isArrowFunction = node->op() == NewArrowFunction;
2020             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2021             ASSERT(locations.size() == (isArrowFunction ? 3 : 2));
2022                 
2023             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
2024             ASSERT_UNUSED(executable, locations.contains(executable));
2025                 
2026             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
2027             ASSERT(locations.contains(activation));
2028
2029             node->child1() = Edge(resolve(block, activation), KnownCellUse);
2030             
2031             if (isArrowFunction) {
2032                 PromotedHeapLocation boundThis(ArrowFunctionBoundThisPLoc, allocation.identifier());
2033                 ASSERT(locations.contains(boundThis));
2034                 node->child2() = Edge(resolve(block, boundThis), CellUse);
2035             }
2036             
2037             break;
2038         }
2039
2040         default:
2041             DFG_CRASH(m_graph, node, "Bad materialize op");
2042         }
2043     }
2044
2045     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where)
2046     {
2047         if (verbose)
2048             dataLog("Recovering ", location, " at ", where, "\n");
2049         ASSERT(location.base()->isPhantomAllocation());
2050         Node* base = getMaterialization(block, location.base());
2051         Node* value = resolve(block, location);
2052
2053         if (verbose)
2054             dataLog("Base is ", base, " and value is ", value, "\n");
2055
2056         if (base->isPhantomAllocation()) {
2057             return PromotedHeapLocation(base, location.descriptor()).createHint(
2058                 m_graph, where->origin.withSemantic(base->origin.semantic), value);
2059         }
2060
2061         switch (location.kind()) {
2062         case NamedPropertyPLoc: {
2063             Allocation& allocation = m_heap.getAllocation(location.base());
2064
2065             Vector<Structure*> structures;
2066             structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2067             unsigned identifierNumber = location.info();
2068             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2069
2070             std::sort(
2071                 structures.begin(),
2072                 structures.end(),
2073                 [uid] (Structure *a, Structure* b) -> bool {
2074                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2075                 });
2076
2077             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2078
2079             if (firstOffset == structures.last()->getConcurrently(uid)) {
2080                 Node* storage = base;
2081                 // FIXME: When we decide to sink objects with a
2082                 // property storage, we should handle non-inline offsets.
2083                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2084
2085                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2086                 data->offset = firstOffset;
2087                 data->identifierNumber = identifierNumber;
2088
2089                 return m_graph.addNode(
2090                     SpecNone,
2091                     PutByOffset,
2092                     where->origin,
2093                     OpInfo(data),
2094                     Edge(storage, KnownCellUse),
2095                     Edge(base, KnownCellUse),
2096                     value->defaultEdge());
2097             }
2098
2099             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2100             data->identifierNumber = identifierNumber;
2101
2102             {
2103                 PropertyOffset currentOffset = firstOffset;
2104                 StructureSet currentSet;
2105                 for (Structure* structure : structures) {
2106                     PropertyOffset offset = structure->getConcurrently(uid);
2107                     if (offset != currentOffset) {
2108                         data->variants.append(
2109                             PutByIdVariant::replace(currentSet, currentOffset));
2110                         currentOffset = offset;
2111                         currentSet.clear();
2112                     }
2113                     currentSet.add(structure);
2114                 }
2115                 data->variants.append(PutByIdVariant::replace(currentSet, currentOffset));
2116             }
2117
2118             return m_graph.addNode(
2119                 SpecNone,
2120                 MultiPutByOffset,
2121                 where->origin.withSemantic(base->origin.semantic),
2122                 OpInfo(data),
2123                 Edge(base, KnownCellUse),
2124                 value->defaultEdge());
2125             break;
2126         }
2127
2128         case ClosureVarPLoc: {
2129             return m_graph.addNode(
2130                 SpecNone,
2131                 PutClosureVar,
2132                 where->origin.withSemantic(base->origin.semantic),
2133                 OpInfo(location.info()),
2134                 Edge(base, KnownCellUse),
2135                 value->defaultEdge());
2136             break;
2137         }
2138
2139         default:
2140             DFG_CRASH(m_graph, base, "Bad location kind");
2141             break;
2142         }
2143     }
2144
2145     SSACalculator m_pointerSSA;
2146     SSACalculator m_allocationSSA;
2147     HashSet<Node*> m_sinkCandidates;
2148     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2149     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2150     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2151     HashMap<Node*, Node*> m_escapeeToMaterialization;
2152     InsertionSet m_insertionSet;
2153     CombinedLiveness m_combinedLiveness;
2154
2155     HashMap<Node*, Node*> m_materializationToEscapee;
2156     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2157     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2158
2159     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2160
2161     BlockMap<LocalHeap> m_heapAtHead;
2162     BlockMap<LocalHeap> m_heapAtTail;
2163     LocalHeap m_heap;
2164
2165     Node* m_bottom = nullptr;
2166 };
2167
2168 }
2169
2170 bool performObjectAllocationSinking(Graph& graph)
2171 {
2172     SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
2173     return runPhase<ObjectAllocationSinkingPhase>(graph);
2174 }
2175
2176 } } // namespace JSC::DFG
2177
2178 #endif // ENABLE(DFG_JIT)