[Win] Clean up annoying compiler warnings
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGClobbersExitState.h"
33 #include "DFGCombinedLiveness.h"
34 #include "DFGGraph.h"
35 #include "DFGInsertionSet.h"
36 #include "DFGLazyNode.h"
37 #include "DFGLivenessAnalysisPhase.h"
38 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPhase.h"
40 #include "DFGPromotedHeapLocation.h"
41 #include "DFGSSACalculator.h"
42 #include "DFGValidate.h"
43 #include "JSCInlines.h"
44
45 #include <list>
46
47 namespace JSC { namespace DFG {
48
49 namespace {
50
51 bool verbose = false;
52
53 // In order to sink object cycles, we use a points-to analysis coupled
54 // with an escape analysis. This analysis is actually similar to an
55 // abstract interpreter focused on local allocations and ignoring
56 // everything else.
57 //
58 // We represent the local heap using two mappings:
59 //
60 // - A set of the local allocations present in the function, where
61 //   each of those have a further mapping from
62 //   PromotedLocationDescriptor to local allocations they must point
63 //   to.
64 //
65 // - A "pointer" mapping from nodes to local allocations, if they must
66 //   be equal to said local allocation and are currently live. This
67 //   can be because the node is the actual node that created the
68 //   allocation, or any other node that must currently point to it -
69 //   we don't make a difference.
70 //
71 // The following graph is a motivation for why we separate allocations
72 // from pointers:
73 //
74 // Block #0
75 //  0: NewObject({})
76 //  1: NewObject({})
77 //  -: PutByOffset(@0, @1, x)
78 //  -: PutStructure(@0, {x:0})
79 //  2: GetByOffset(@0, x)
80 //  -: Jump(#1)
81 //
82 // Block #1
83 //  -: Return(@2)
84 //
85 // Here, we need to remember in block #1 that @2 points to a local
86 // allocation with appropriate fields and structures information
87 // (because we should be able to place a materialization on top of
88 // block #1 here), even though @1 is dead. We *could* just keep @1
89 // artificially alive here, but there is no real reason to do it:
90 // after all, by the end of block #0, @1 and @2 should be completely
91 // interchangeable, and there is no reason for us to artificially make
92 // @1 more important.
93 //
94 // An important point to consider to understand this separation is
95 // that we should think of the local heap as follow: we have a
96 // bunch of nodes that are pointers to "allocations" that live
97 // someplace on the heap, and those allocations can have pointers in
98 // between themselves as well. We shouldn't care about whatever
99 // names we give to the allocations ; what matters when
100 // comparing/merging two heaps is the isomorphism/comparison between
101 // the allocation graphs as seen by the nodes.
102 //
103 // For instance, in the following graph:
104 //
105 // Block #0
106 //  0: NewObject({})
107 //  -: Branch(#1, #2)
108 //
109 // Block #1
110 //  1: NewObject({})
111 //  -: PutByOffset(@0, @1, x)
112 //  -: PutStructure(@0, {x:0})
113 //  -: Jump(#3)
114 //
115 // Block #2
116 //  2: NewObject({})
117 //  -: PutByOffset(@2, undefined, x)
118 //  -: PutStructure(@2, {x:0})
119 //  -: PutByOffset(@0, @2, x)
120 //  -: PutStructure(@0, {x:0})
121 //  -: Jump(#3)
122 //
123 // Block #3
124 //  -: Return(@0)
125 //
126 // we should think of the heaps at tail of blocks #1 and #2 as being
127 // exactly the same, even though one has @0.x pointing to @1 and the
128 // other has @0.x pointing to @2, because in essence this should not
129 // be different from the graph where we hoisted @1 and @2 into a
130 // single allocation in block #0. We currently will not handle this
131 // case, because we merge allocations based on the node they are
132 // coming from, but this is only a technicality for the sake of
133 // simplicity that shouldn't hide the deeper idea outlined here.
134
135 class Allocation {
136 public:
137     // We use Escaped as a special allocation kind because when we
138     // decide to sink an allocation, we still need to keep track of it
139     // once it is escaped if it still has pointers to it in order to
140     // replace any use of those pointers by the corresponding
141     // materialization
142     enum class Kind { Escaped, Object, Activation, Function, GeneratorFunction };
143
144     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
145         : m_identifier(identifier)
146         , m_kind(kind)
147     {
148     }
149
150
151     const HashMap<PromotedLocationDescriptor, Node*>& fields() const
152     {
153         return m_fields;
154     }
155
156     Node* get(PromotedLocationDescriptor descriptor)
157     {
158         return m_fields.get(descriptor);
159     }
160
161     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
162     {
163         // Pointing to anything else than an unescaped local
164         // allocation is represented by simply not having the
165         // field
166         if (value)
167             m_fields.set(descriptor, value);
168         else
169             m_fields.remove(descriptor);
170         return *this;
171     }
172
173     void remove(PromotedLocationDescriptor descriptor)
174     {
175         set(descriptor, nullptr);
176     }
177
178     bool hasStructures() const
179     {
180         switch (kind()) {
181         case Kind::Object:
182             return true;
183
184         default:
185             return false;
186         }
187     }
188
189     Allocation& setStructures(const StructureSet& structures)
190     {
191         ASSERT(hasStructures() && !structures.isEmpty());
192         m_structures = structures;
193         return *this;
194     }
195
196     Allocation& mergeStructures(const StructureSet& structures)
197     {
198         ASSERT(hasStructures() || structures.isEmpty());
199         m_structures.merge(structures);
200         return *this;
201     }
202
203     Allocation& filterStructures(const StructureSet& structures)
204     {
205         ASSERT(hasStructures());
206         m_structures.filter(structures);
207         return *this;
208     }
209
210     const StructureSet& structures() const
211     {
212         return m_structures;
213     }
214
215     Node* identifier() const { return m_identifier; }
216
217     Kind kind() const { return m_kind; }
218
219     bool isEscapedAllocation() const
220     {
221         return kind() == Kind::Escaped;
222     }
223
224     bool isObjectAllocation() const
225     {
226         return m_kind == Kind::Object;
227     }
228
229     bool isActivationAllocation() const
230     {
231         return m_kind == Kind::Activation;
232     }
233
234     bool isFunctionAllocation() const
235     {
236         return m_kind == Kind::Function || m_kind == Kind::GeneratorFunction;
237     }
238
239     bool operator==(const Allocation& other) const
240     {
241         return m_identifier == other.m_identifier
242             && m_kind == other.m_kind
243             && m_fields == other.m_fields
244             && m_structures == other.m_structures;
245     }
246
247     bool operator!=(const Allocation& other) const
248     {
249         return !(*this == other);
250     }
251
252     void dump(PrintStream& out) const
253     {
254         dumpInContext(out, nullptr);
255     }
256
257     void dumpInContext(PrintStream& out, DumpContext* context) const
258     {
259         switch (m_kind) {
260         case Kind::Escaped:
261             out.print("Escaped");
262             break;
263
264         case Kind::Object:
265             out.print("Object");
266             break;
267
268         case Kind::Function:
269             out.print("Function");
270             break;
271
272         case Kind::GeneratorFunction:
273             out.print("GeneratorFunction");
274             break;
275
276         case Kind::Activation:
277             out.print("Activation");
278             break;
279         }
280         out.print("Allocation(");
281         if (!m_structures.isEmpty())
282             out.print(inContext(m_structures, context));
283         if (!m_fields.isEmpty()) {
284             if (!m_structures.isEmpty())
285                 out.print(", ");
286             out.print(mapDump(m_fields, " => #", ", "));
287         }
288         out.print(")");
289     }
290
291 private:
292     Node* m_identifier; // This is the actual node that created the allocation
293     Kind m_kind;
294     HashMap<PromotedLocationDescriptor, Node*> m_fields;
295     StructureSet m_structures;
296 };
297
298 class LocalHeap {
299 public:
300     Allocation& newAllocation(Node* node, Allocation::Kind kind)
301     {
302         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
303         m_pointers.add(node, node);
304         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
305     }
306
307     bool isAllocation(Node* identifier) const
308     {
309         return m_allocations.contains(identifier);
310     }
311
312     // Note that this is fundamentally different from
313     // onlyLocalAllocation() below. getAllocation() takes as argument
314     // a node-as-identifier, that is, an allocation node. This
315     // allocation node doesn't have to be alive; it may only be
316     // pointed to by other nodes or allocation fields.
317     // For instance, in the following graph:
318     //
319     // Block #0
320     //  0: NewObject({})
321     //  1: NewObject({})
322     //  -: PutByOffset(@0, @1, x)
323     //  -: PutStructure(@0, {x:0})
324     //  2: GetByOffset(@0, x)
325     //  -: Jump(#1)
326     //
327     // Block #1
328     //  -: Return(@2)
329     //
330     // At head of block #1, the only reachable allocation is #@1,
331     // which can be reached through node @2. Thus, getAllocation(#@1)
332     // contains the appropriate metadata for this allocation, but
333     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
334     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
335     // is the same as getAllocation(#@1), while getAllocation(#@2)
336     // does not make sense since @2 is not an allocation node.
337     //
338     // This is meant to be used when the node is already known to be
339     // an identifier (i.e. an allocation) - probably because it was
340     // found as value of a field or pointer in the current heap, or
341     // was the result of a call to follow(). In any other cases (such
342     // as when doing anything while traversing the graph), the
343     // appropriate function to call is probably onlyLocalAllocation.
344     Allocation& getAllocation(Node* identifier)
345     {
346         auto iter = m_allocations.find(identifier);
347         ASSERT(iter != m_allocations.end());
348         return iter->value;
349     }
350
351     void newPointer(Node* node, Node* identifier)
352     {
353         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
354         ASSERT(isAllocation(identifier));
355         m_pointers.add(node, identifier);
356     }
357
358     // follow solves the points-to problem. Given a live node, which
359     // may be either an allocation itself or a heap read (e.g. a
360     // GetByOffset node), it returns the corresponding allocation
361     // node, if there is one. If the argument node is neither an
362     // allocation or a heap read, or may point to different nodes,
363     // nullptr will be returned. Note that a node that points to
364     // different nodes can never point to an unescaped local
365     // allocation.
366     Node* follow(Node* node) const
367     {
368         auto iter = m_pointers.find(node);
369         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
370         return iter == m_pointers.end() ? nullptr : iter->value;
371     }
372
373     Node* follow(PromotedHeapLocation location) const
374     {
375         const Allocation& base = m_allocations.find(location.base())->value;
376         auto iter = base.fields().find(location.descriptor());
377
378         if (iter == base.fields().end())
379             return nullptr;
380
381         return iter->value;
382     }
383
384     // onlyLocalAllocation find the corresponding allocation metadata
385     // for any live node. onlyLocalAllocation(node) is essentially
386     // getAllocation(follow(node)), with appropriate null handling.
387     Allocation* onlyLocalAllocation(Node* node)
388     {
389         Node* identifier = follow(node);
390         if (!identifier)
391             return nullptr;
392
393         return &getAllocation(identifier);
394     }
395
396     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
397     {
398         Node* identifier = follow(location);
399         if (!identifier)
400             return nullptr;
401
402         return &getAllocation(identifier);
403     }
404
405     // This allows us to store the escapees only when necessary. If
406     // set, the current escapees can be retrieved at any time using
407     // takeEscapees(), which will clear the cached set of escapees;
408     // otherwise the heap won't remember escaping allocations.
409     void setWantEscapees()
410     {
411         m_wantEscapees = true;
412     }
413
414     HashMap<Node*, Allocation> takeEscapees()
415     {
416         return WTFMove(m_escapees);
417     }
418
419     void escape(Node* node)
420     {
421         Node* identifier = follow(node);
422         if (!identifier)
423             return;
424
425         escapeAllocation(identifier);
426     }
427
428     void merge(const LocalHeap& other)
429     {
430         assertIsValid();
431         other.assertIsValid();
432         ASSERT(!m_wantEscapees);
433
434         if (!reached()) {
435             ASSERT(other.reached());
436             *this = other;
437             return;
438         }
439
440         HashSet<Node*> toEscape;
441
442         for (auto& allocationEntry : other.m_allocations)
443             m_allocations.add(allocationEntry.key, allocationEntry.value);
444         for (auto& allocationEntry : m_allocations) {
445             auto allocationIter = other.m_allocations.find(allocationEntry.key);
446
447             // If we have it and they don't, it died for them but we
448             // are keeping it alive from another field somewhere.
449             // There is nothing to do - we will be escaped
450             // automatically when we handle that other field.
451             // This will also happen for allocation that we have and
452             // they don't, and all of those will get pruned.
453             if (allocationIter == other.m_allocations.end())
454                 continue;
455
456             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
457                 toEscape.add(allocationEntry.key);
458                 for (const auto& fieldEntry : allocationIter->value.fields())
459                     toEscape.add(fieldEntry.value);
460             } else {
461                 mergePointerSets(
462                     allocationEntry.value.fields(), allocationIter->value.fields(),
463                     [&] (Node* identifier) {
464                         toEscape.add(identifier);
465                     },
466                     [&] (PromotedLocationDescriptor field) {
467                         allocationEntry.value.remove(field);
468                     });
469                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
470             }
471         }
472
473         mergePointerSets(m_pointers, other.m_pointers,
474             [&] (Node* identifier) {
475                 toEscape.add(identifier);
476             },
477             [&] (Node* field) {
478                 m_pointers.remove(field);
479             });
480
481         for (Node* identifier : toEscape)
482             escapeAllocation(identifier);
483
484         if (!ASSERT_DISABLED) {
485             for (const auto& entry : m_allocations)
486                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
487         }
488
489         // If there is no remaining pointer to an allocation, we can
490         // remove it. This should only happen for escaped allocations,
491         // because we only merge liveness-pruned heaps in the first
492         // place.
493         prune();
494
495         assertIsValid();
496     }
497
498     void pruneByLiveness(const HashSet<Node*>& live)
499     {
500         Vector<Node*> toRemove;
501         for (const auto& entry : m_pointers) {
502             if (!live.contains(entry.key))
503                 toRemove.append(entry.key);
504         }
505         for (Node* node : toRemove)
506             m_pointers.remove(node);
507
508         prune();
509     }
510
511     void assertIsValid() const
512     {
513         if (ASSERT_DISABLED)
514             return;
515
516         // Pointers should point to an actual allocation
517         for (const auto& entry : m_pointers) {
518             ASSERT_UNUSED(entry, entry.value);
519             ASSERT(m_allocations.contains(entry.value));
520         }
521
522         for (const auto& allocationEntry : m_allocations) {
523             // Fields should point to an actual allocation
524             for (const auto& fieldEntry : allocationEntry.value.fields()) {
525                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
526                 ASSERT(m_allocations.contains(fieldEntry.value));
527             }
528         }
529     }
530
531     bool operator==(const LocalHeap& other) const
532     {
533         assertIsValid();
534         other.assertIsValid();
535         return m_allocations == other.m_allocations
536             && m_pointers == other.m_pointers;
537     }
538
539     bool operator!=(const LocalHeap& other) const
540     {
541         return !(*this == other);
542     }
543
544     const HashMap<Node*, Allocation>& allocations() const
545     {
546         return m_allocations;
547     }
548
549     const HashMap<Node*, Node*>& pointers() const
550     {
551         return m_pointers;
552     }
553
554     void dump(PrintStream& out) const
555     {
556         out.print("  Allocations:\n");
557         for (const auto& entry : m_allocations)
558             out.print("    #", entry.key, ": ", entry.value, "\n");
559         out.print("  Pointers:\n");
560         for (const auto& entry : m_pointers)
561             out.print("    ", entry.key, " => #", entry.value, "\n");
562     }
563
564     bool reached() const
565     {
566         return m_reached;
567     }
568
569     void setReached()
570     {
571         m_reached = true;
572     }
573
574 private:
575     // When we merge two heaps, we escape all fields of allocations,
576     // unless they point to the same thing in both heaps.
577     // The reason for this is that it allows us not to do extra work
578     // for diamond graphs where we would otherwise have to check
579     // whether we have a single definition or not, which would be
580     // cumbersome.
581     //
582     // Note that we should try to unify nodes even when they are not
583     // from the same allocation; for instance we should be able to
584     // completely eliminate all allocations from the following graph:
585     //
586     // Block #0
587     //  0: NewObject({})
588     //  -: Branch(#1, #2)
589     //
590     // Block #1
591     //  1: NewObject({})
592     //  -: PutByOffset(@1, "left", val)
593     //  -: PutStructure(@1, {val:0})
594     //  -: PutByOffset(@0, @1, x)
595     //  -: PutStructure(@0, {x:0})
596     //  -: Jump(#3)
597     //
598     // Block #2
599     //  2: NewObject({})
600     //  -: PutByOffset(@2, "right", val)
601     //  -: PutStructure(@2, {val:0})
602     //  -: PutByOffset(@0, @2, x)
603     //  -: PutStructure(@0, {x:0})
604     //  -: Jump(#3)
605     //
606     // Block #3:
607     //  3: GetByOffset(@0, x)
608     //  4: GetByOffset(@3, val)
609     //  -: Return(@4)
610     template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
611     void mergePointerSets(
612         const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
613         const EscapeFunctor& escape, const RemoveFunctor& remove)
614     {
615         Vector<Key> toRemove;
616         for (const auto& entry : my) {
617             auto iter = their.find(entry.key);
618             if (iter == their.end()) {
619                 toRemove.append(entry.key);
620                 escape(entry.value);
621             } else if (iter->value != entry.value) {
622                 toRemove.append(entry.key);
623                 escape(entry.value);
624                 escape(iter->value);
625             }
626         }
627         for (const auto& entry : their) {
628             if (my.contains(entry.key))
629                 continue;
630             escape(entry.value);
631         }
632         for (Key key : toRemove)
633             remove(key);
634     }
635
636     void escapeAllocation(Node* identifier)
637     {
638         Allocation& allocation = getAllocation(identifier);
639         if (allocation.isEscapedAllocation())
640             return;
641
642         Allocation unescaped = WTFMove(allocation);
643         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
644
645         for (const auto& entry : unescaped.fields())
646             escapeAllocation(entry.value);
647
648         if (m_wantEscapees)
649             m_escapees.add(unescaped.identifier(), WTFMove(unescaped));
650     }
651
652     void prune()
653     {
654         HashSet<Node*> reachable;
655         for (const auto& entry : m_pointers)
656             reachable.add(entry.value);
657
658         // Repeatedly mark as reachable allocations in fields of other
659         // reachable allocations
660         {
661             Vector<Node*> worklist;
662             worklist.appendRange(reachable.begin(), reachable.end());
663
664             while (!worklist.isEmpty()) {
665                 Node* identifier = worklist.takeLast();
666                 Allocation& allocation = m_allocations.find(identifier)->value;
667                 for (const auto& entry : allocation.fields()) {
668                     if (reachable.add(entry.value).isNewEntry)
669                         worklist.append(entry.value);
670                 }
671             }
672         }
673
674         // Remove unreachable allocations
675         {
676             Vector<Node*> toRemove;
677             for (const auto& entry : m_allocations) {
678                 if (!reachable.contains(entry.key))
679                     toRemove.append(entry.key);
680             }
681             for (Node* identifier : toRemove)
682                 m_allocations.remove(identifier);
683         }
684     }
685
686     bool m_reached = false;
687     HashMap<Node*, Node*> m_pointers;
688     HashMap<Node*, Allocation> m_allocations;
689
690     bool m_wantEscapees = false;
691     HashMap<Node*, Allocation> m_escapees;
692 };
693
694 class ObjectAllocationSinkingPhase : public Phase {
695 public:
696     ObjectAllocationSinkingPhase(Graph& graph)
697         : Phase(graph, "object allocation elimination")
698         , m_pointerSSA(graph)
699         , m_allocationSSA(graph)
700         , m_insertionSet(graph)
701     {
702     }
703
704     bool run()
705     {
706         ASSERT(m_graph.m_form == SSA);
707         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
708
709         if (!performSinking())
710             return false;
711
712         if (verbose) {
713             dataLog("Graph after elimination:\n");
714             m_graph.dump();
715         }
716
717         return true;
718     }
719
720 private:
721     bool performSinking()
722     {
723         m_graph.computeRefCounts();
724         m_graph.initializeNodeOwners();
725         m_graph.ensureDominators();
726         performLivenessAnalysis(m_graph);
727         performOSRAvailabilityAnalysis(m_graph);
728         m_combinedLiveness = CombinedLiveness(m_graph);
729
730         CString graphBeforeSinking;
731         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
732             StringPrintStream out;
733             m_graph.dump(out);
734             graphBeforeSinking = out.toCString();
735         }
736
737         if (verbose) {
738             dataLog("Graph before elimination:\n");
739             m_graph.dump();
740         }
741
742         performAnalysis();
743
744         if (!determineSinkCandidates())
745             return false;
746
747         if (verbose) {
748             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
749                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
750                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
751             }
752         }
753
754         promoteLocalHeap();
755
756         if (Options::validateGraphAtEachPhase())
757             DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
758         return true;
759     }
760
761     void performAnalysis()
762     {
763         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
764         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
765
766         bool changed;
767         do {
768             if (verbose)
769                 dataLog("Doing iteration of escape analysis.\n");
770             changed = false;
771
772             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
773                 m_heapAtHead[block].setReached();
774                 m_heap = m_heapAtHead[block];
775
776                 for (Node* node : *block) {
777                     handleNode(
778                         node,
779                         [] (PromotedHeapLocation, LazyNode) { },
780                         [&] (PromotedHeapLocation) -> Node* {
781                             return nullptr;
782                         });
783                 }
784
785                 if (m_heap == m_heapAtTail[block])
786                     continue;
787
788                 m_heapAtTail[block] = m_heap;
789                 changed = true;
790
791                 m_heap.assertIsValid();
792
793                 // We keep only pointers that are live, and only
794                 // allocations that are either live, pointed to by a
795                 // live pointer, or (recursively) stored in a field of
796                 // a live allocation.
797                 //
798                 // This means we can accidentaly leak non-dominating
799                 // nodes into the successor. However, due to the
800                 // non-dominance property, we are guaranteed that the
801                 // successor has at least one predecessor that is not
802                 // dominated either: this means any reference to a
803                 // non-dominating allocation in the successor will
804                 // trigger an escape and get pruned during the merge.
805                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
806
807                 for (BasicBlock* successorBlock : block->successors())
808                     m_heapAtHead[successorBlock].merge(m_heap);
809             }
810         } while (changed);
811     }
812
813     template<typename WriteFunctor, typename ResolveFunctor>
814     void handleNode(
815         Node* node,
816         const WriteFunctor& heapWrite,
817         const ResolveFunctor& heapResolve)
818     {
819         m_heap.assertIsValid();
820         ASSERT(m_heap.takeEscapees().isEmpty());
821
822         Allocation* target = nullptr;
823         HashMap<PromotedLocationDescriptor, LazyNode> writes;
824         PromotedLocationDescriptor exactRead;
825
826         switch (node->op()) {
827         case NewObject:
828             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
829             target->setStructures(node->structure());
830             writes.add(
831                 StructurePLoc, LazyNode(m_graph.freeze(node->structure())));
832             break;
833
834         case NewFunction:
835         case NewGeneratorFunction: {
836             if (isStillValid(node->castOperand<FunctionExecutable*>()->singletonFunction())) {
837                 m_heap.escape(node->child1().node());
838                 break;
839             }
840             
841             if (node->op() == NewGeneratorFunction)
842                 target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction);
843             else
844                 target = &m_heap.newAllocation(node, Allocation::Kind::Function);
845             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
846             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
847             break;
848         }
849
850         case CreateActivation: {
851             if (isStillValid(node->castOperand<SymbolTable*>()->singletonScope())) {
852                 m_heap.escape(node->child1().node());
853                 break;
854             }
855             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
856             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
857             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
858             {
859                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
860                 ConcurrentJITLocker locker(symbolTable->m_lock);
861                 LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
862                 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
863                     writes.add(
864                         PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
865                         initialValue);
866                 }
867             }
868             break;
869         }
870
871         case PutStructure:
872             target = m_heap.onlyLocalAllocation(node->child1().node());
873             if (target && target->isObjectAllocation()) {
874                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next))));
875                 target->setStructures(node->transition()->next);
876             } else
877                 m_heap.escape(node->child1().node());
878             break;
879
880         case CheckStructure: {
881             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
882             if (allocation && allocation->isObjectAllocation()) {
883                 allocation->filterStructures(node->structureSet());
884                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
885                     node->convertToCheckStructureImmediate(value);
886             } else
887                 m_heap.escape(node->child1().node());
888             break;
889         }
890
891         case GetByOffset:
892         case GetGetterSetterByOffset:
893             target = m_heap.onlyLocalAllocation(node->child2().node());
894             if (target && target->isObjectAllocation()) {
895                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
896                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
897             } else {
898                 m_heap.escape(node->child1().node());
899                 m_heap.escape(node->child2().node());
900             }
901             break;
902
903         case MultiGetByOffset: {
904             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
905             if (allocation && allocation->isObjectAllocation()) {
906                 MultiGetByOffsetData& data = node->multiGetByOffsetData();
907                 StructureSet validStructures;
908                 bool hasInvalidStructures = false;
909                 for (const auto& multiGetByOffsetCase : data.cases) {
910                     if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
911                         continue;
912
913                     switch (multiGetByOffsetCase.method().kind()) {
914                     case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
915                     case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
916                         hasInvalidStructures = true;
917                         break;
918
919                     case GetByOffsetMethod::Load: // We're good
920                         validStructures.merge(multiGetByOffsetCase.set());
921                         break;
922
923                     default:
924                         RELEASE_ASSERT_NOT_REACHED();
925                     }
926                 }
927                 if (hasInvalidStructures) {
928                     m_heap.escape(node->child1().node());
929                     break;
930                 }
931                 unsigned identifierNumber = data.identifierNumber;
932                 PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
933                 if (Node* value = heapResolve(location)) {
934                     if (allocation->structures().isSubsetOf(validStructures))
935                         node->replaceWith(value);
936                     else {
937                         Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
938                         ASSERT(structure);
939                         allocation->filterStructures(validStructures);
940                         node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
941                         node->convertToCheckStructureImmediate(structure);
942                         node->setReplacement(value);
943                     }
944                 } else if (!allocation->structures().isSubsetOf(validStructures)) {
945                     // Even though we don't need the result here, we still need
946                     // to make the call to tell our caller that we could need
947                     // the StructurePLoc.
948                     // The reason for this is that when we decide not to sink a
949                     // node, we will still lower any read to its fields before
950                     // it escapes (which are usually reads across a function
951                     // call that DFGClobberize can't handle) - but we only do
952                     // this for PromotedHeapLocations that we have seen read
953                     // during the analysis!
954                     heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
955                     allocation->filterStructures(validStructures);
956                 }
957                 Node* identifier = allocation->get(location.descriptor());
958                 if (identifier)
959                     m_heap.newPointer(node, identifier);
960             } else
961                 m_heap.escape(node->child1().node());
962             break;
963         }
964
965         case PutByOffset:
966             target = m_heap.onlyLocalAllocation(node->child2().node());
967             if (target && target->isObjectAllocation()) {
968                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
969                 writes.add(
970                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
971                     LazyNode(node->child3().node()));
972             } else {
973                 m_heap.escape(node->child1().node());
974                 m_heap.escape(node->child2().node());
975                 m_heap.escape(node->child3().node());
976             }
977             break;
978
979         case GetClosureVar:
980             target = m_heap.onlyLocalAllocation(node->child1().node());
981             if (target && target->isActivationAllocation()) {
982                 exactRead =
983                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
984             } else
985                 m_heap.escape(node->child1().node());
986             break;
987
988         case PutClosureVar:
989             target = m_heap.onlyLocalAllocation(node->child1().node());
990             if (target && target->isActivationAllocation()) {
991                 writes.add(
992                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
993                     LazyNode(node->child2().node()));
994             } else {
995                 m_heap.escape(node->child1().node());
996                 m_heap.escape(node->child2().node());
997             }
998             break;
999
1000         case SkipScope:
1001             target = m_heap.onlyLocalAllocation(node->child1().node());
1002             if (target && target->isActivationAllocation())
1003                 exactRead = ActivationScopePLoc;
1004             else
1005                 m_heap.escape(node->child1().node());
1006             break;
1007
1008         case GetExecutable:
1009             target = m_heap.onlyLocalAllocation(node->child1().node());
1010             if (target && target->isFunctionAllocation())
1011                 exactRead = FunctionExecutablePLoc;
1012             else
1013                 m_heap.escape(node->child1().node());
1014             break;
1015
1016         case GetScope:
1017             target = m_heap.onlyLocalAllocation(node->child1().node());
1018             if (target && target->isFunctionAllocation())
1019                 exactRead = FunctionActivationPLoc;
1020             else
1021                 m_heap.escape(node->child1().node());
1022             break;
1023
1024         case Check:
1025             m_graph.doToChildren(
1026                 node,
1027                 [&] (Edge edge) {
1028                     if (edge.willNotHaveCheck())
1029                         return;
1030
1031                     if (alreadyChecked(edge.useKind(), SpecObject))
1032                         return;
1033
1034                     m_heap.escape(edge.node());
1035                 });
1036             break;
1037
1038         case MovHint:
1039         case PutHint:
1040             // Handled by OSR availability analysis
1041             break;
1042
1043         default:
1044             m_graph.doToChildren(
1045                 node,
1046                 [&] (Edge edge) {
1047                     m_heap.escape(edge.node());
1048                 });
1049             break;
1050         }
1051
1052         if (exactRead) {
1053             ASSERT(target);
1054             ASSERT(writes.isEmpty());
1055             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1056                 ASSERT(!value->replacement());
1057                 node->replaceWith(value);
1058             }
1059             Node* identifier = target->get(exactRead);
1060             if (identifier)
1061                 m_heap.newPointer(node, identifier);
1062         }
1063
1064         for (auto entry : writes) {
1065             ASSERT(target);
1066             if (entry.value.isNode())
1067                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1068             else
1069                 target->remove(entry.key);
1070             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1071         }
1072
1073         m_heap.assertIsValid();
1074     }
1075
1076     bool determineSinkCandidates()
1077     {
1078         m_sinkCandidates.clear();
1079         m_materializationToEscapee.clear();
1080         m_materializationSiteToMaterializations.clear();
1081         m_materializationSiteToRecoveries.clear();
1082
1083         // Logically we wish to consider every allocation and sink
1084         // it. However, it is probably not profitable to sink an
1085         // allocation that will always escape. So, we only sink an
1086         // allocation if one of the following is true:
1087         //
1088         // 1) There exists a basic block with only backwards outgoing
1089         //    edges (or no outgoing edges) in which the node wasn't
1090         //    materialized. This is meant to catch
1091         //    effectively-infinite loops in which we don't need to
1092         //    have allocated the object.
1093         //
1094         // 2) There exists a basic block at the tail of which the node
1095         //    is dead and not materialized.
1096         //
1097         // 3) The sum of execution counts of the materializations is
1098         //    less than the sum of execution counts of the original
1099         //    node.
1100         //
1101         // We currently implement only rule #2.
1102         // FIXME: Implement the two other rules.
1103         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1104         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1105         //
1106         // However, these rules allow for a sunk object to be put into
1107         // a non-sunk one, which we don't support. We could solve this
1108         // by supporting PutHints on local allocations, making these
1109         // objects only partially correct, and we would need to adapt
1110         // the OSR availability analysis and OSR exit to handle
1111         // this. This would be totally doable, but would create a
1112         // super rare, and thus bug-prone, code path.
1113         // So, instead, we need to implement one of the following
1114         // closure rules:
1115         //
1116         // 1) If we put a sink candidate into a local allocation that
1117         //    is not a sink candidate, change our minds and don't
1118         //    actually sink the sink candidate.
1119         //
1120         // 2) If we put a sink candidate into a local allocation, that
1121         //    allocation becomes a sink candidate as well.
1122         //
1123         // We currently choose to implement closure rule #2.
1124         HashMap<Node*, Vector<Node*>> dependencies;
1125         bool hasUnescapedReads = false;
1126         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1127             m_heap = m_heapAtHead[block];
1128
1129             for (Node* node : *block) {
1130                 handleNode(
1131                     node,
1132                     [&] (PromotedHeapLocation location, LazyNode value) {
1133                         if (!value.isNode())
1134                             return;
1135
1136                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1137                         if (allocation && !allocation->isEscapedAllocation())
1138                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1139                     },
1140                     [&] (PromotedHeapLocation) -> Node* {
1141                         hasUnescapedReads = true;
1142                         return nullptr;
1143                     });
1144             }
1145
1146             // The sink candidates are initially the unescaped
1147             // allocations dying at tail of blocks
1148             HashSet<Node*> allocations;
1149             for (const auto& entry : m_heap.allocations()) {
1150                 if (!entry.value.isEscapedAllocation())
1151                     allocations.add(entry.key);
1152             }
1153
1154             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1155
1156             for (Node* identifier : allocations) {
1157                 if (!m_heap.isAllocation(identifier))
1158                     m_sinkCandidates.add(identifier);
1159             }
1160         }
1161
1162         // Ensure that the set of sink candidates is closed for put operations
1163         Vector<Node*> worklist;
1164         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1165
1166         while (!worklist.isEmpty()) {
1167             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1168                 if (m_sinkCandidates.add(identifier).isNewEntry)
1169                     worklist.append(identifier);
1170             }
1171         }
1172
1173         if (m_sinkCandidates.isEmpty())
1174             return hasUnescapedReads;
1175
1176         if (verbose)
1177             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1178
1179         // Create the materialization nodes
1180         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1181             m_heap = m_heapAtHead[block];
1182             m_heap.setWantEscapees();
1183
1184             for (Node* node : *block) {
1185                 handleNode(
1186                     node,
1187                     [] (PromotedHeapLocation, LazyNode) { },
1188                     [] (PromotedHeapLocation) -> Node* {
1189                         return nullptr;
1190                     });
1191                 auto escapees = m_heap.takeEscapees();
1192                 if (!escapees.isEmpty())
1193                     placeMaterializations(escapees, node);
1194             }
1195
1196             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1197
1198             {
1199                 HashMap<Node*, Allocation> escapingOnEdge;
1200                 for (const auto& entry : m_heap.allocations()) {
1201                     if (entry.value.isEscapedAllocation())
1202                         continue;
1203
1204                     bool mustEscape = false;
1205                     for (BasicBlock* successorBlock : block->successors()) {
1206                         if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1207                             || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1208                             mustEscape = true;
1209                     }
1210
1211                     if (mustEscape)
1212                         escapingOnEdge.add(entry.key, entry.value);
1213                 }
1214                 placeMaterializations(WTFMove(escapingOnEdge), block->terminal());
1215             }
1216         }
1217
1218         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1219     }
1220
1221     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1222     {
1223         // We don't create materializations if the escapee is not a
1224         // sink candidate
1225         Vector<Node*> toRemove;
1226         for (const auto& entry : escapees) {
1227             if (!m_sinkCandidates.contains(entry.key))
1228                 toRemove.append(entry.key);
1229         }
1230         for (Node* identifier : toRemove)
1231             escapees.remove(identifier);
1232
1233         if (escapees.isEmpty())
1234             return;
1235
1236         // First collect the hints that will be needed when the node
1237         // we materialize is still stored into other unescaped sink candidates
1238         Vector<PromotedHeapLocation> hints;
1239         for (const auto& entry : m_heap.allocations()) {
1240             if (escapees.contains(entry.key))
1241                 continue;
1242
1243             for (const auto& field : entry.value.fields()) {
1244                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1245                 if (escapees.contains(field.value) && !field.key.neededForMaterialization())
1246                     hints.append(PromotedHeapLocation(entry.key, field.key));
1247             }
1248         }
1249
1250         // Now we need to order the materialization. Any order is
1251         // valid (as long as we materialize a node first if it is
1252         // needed for the materialization of another node, e.g. a
1253         // function's activation must be materialized before the
1254         // function itself), but we want to try minimizing the number
1255         // of times we have to place Puts to close cycles after a
1256         // materialization. In other words, we are trying to find the
1257         // minimum number of materializations to remove from the
1258         // materialization graph to make it a DAG, known as the
1259         // (vertex) feedback set problem. Unfortunately, this is a
1260         // NP-hard problem, which we don't want to solve exactly.
1261         //
1262         // Instead, we use a simple greedy procedure, that procedes as
1263         // follow:
1264         //  - While there is at least one node with no outgoing edge
1265         //    amongst the remaining materializations, materialize it
1266         //    first
1267         //
1268         //  - Similarily, while there is at least one node with no
1269         //    incoming edge amongst the remaining materializations,
1270         //    materialize it last.
1271         //
1272         //  - When both previous conditions are false, we have an
1273         //    actual cycle, and we need to pick a node to
1274         //    materialize. We try greedily to remove the "pressure" on
1275         //    the remaining nodes by choosing the node with maximum
1276         //    |incoming edges| * |outgoing edges| as a measure of how
1277         //    "central" to the graph it is. We materialize it first,
1278         //    so that all the recoveries will be Puts of things into
1279         //    it (rather than Puts of the materialization into other
1280         //    objects), which means we will have a single
1281         //    StoreBarrier.
1282
1283
1284         // Compute dependencies between materializations
1285         HashMap<Node*, HashSet<Node*>> dependencies;
1286         HashMap<Node*, HashSet<Node*>> reverseDependencies;
1287         HashMap<Node*, HashSet<Node*>> forMaterialization;
1288         for (const auto& entry : escapees) {
1289             auto& myDependencies = dependencies.add(entry.key, HashSet<Node*>()).iterator->value;
1290             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, HashSet<Node*>()).iterator->value;
1291             reverseDependencies.add(entry.key, HashSet<Node*>());
1292             for (const auto& field : entry.value.fields()) {
1293                 if (escapees.contains(field.value) && field.value != entry.key) {
1294                     myDependencies.add(field.value);
1295                     reverseDependencies.add(field.value, HashSet<Node*>()).iterator->value.add(entry.key);
1296                     if (field.key.neededForMaterialization())
1297                         myDependenciesForMaterialization.add(field.value);
1298                 }
1299             }
1300         }
1301
1302         // Helper function to update the materialized set and the
1303         // dependencies
1304         HashSet<Node*> materialized;
1305         auto materialize = [&] (Node* identifier) {
1306             materialized.add(identifier);
1307             for (Node* dep : dependencies.get(identifier))
1308                 reverseDependencies.find(dep)->value.remove(identifier);
1309             for (Node* rdep : reverseDependencies.get(identifier)) {
1310                 dependencies.find(rdep)->value.remove(identifier);
1311                 forMaterialization.find(rdep)->value.remove(identifier);
1312             }
1313             dependencies.remove(identifier);
1314             reverseDependencies.remove(identifier);
1315             forMaterialization.remove(identifier);
1316         };
1317
1318         // Nodes without remaining unmaterialized fields will be
1319         // materialized first - amongst the remaining unmaterialized
1320         // nodes
1321         std::list<Allocation> toMaterialize;
1322         auto firstPos = toMaterialize.begin();
1323         auto materializeFirst = [&] (Allocation&& allocation) {
1324             materialize(allocation.identifier());
1325             // We need to insert *after* the current position
1326             if (firstPos != toMaterialize.end())
1327                 ++firstPos;
1328             firstPos = toMaterialize.insert(firstPos, WTFMove(allocation));
1329         };
1330
1331         // Nodes that no other unmaterialized node points to will be
1332         // materialized last - amongst the remaining unmaterialized
1333         // nodes
1334         auto lastPos = toMaterialize.end();
1335         auto materializeLast = [&] (Allocation&& allocation) {
1336             materialize(allocation.identifier());
1337             lastPos = toMaterialize.insert(lastPos, WTFMove(allocation));
1338         };
1339
1340         // These are the promoted locations that contains some of the
1341         // allocations we are currently escaping. If they are a location on
1342         // some other allocation we are currently materializing, we will need
1343         // to "recover" their value with a real put once the corresponding
1344         // allocation is materialized; if they are a location on some other
1345         // not-yet-materialized allocation, we will need a PutHint.
1346         Vector<PromotedHeapLocation> toRecover;
1347
1348         // This loop does the actual cycle breaking
1349         while (!escapees.isEmpty()) {
1350             materialized.clear();
1351
1352             // Materialize nodes that won't require recoveries if we can
1353             for (auto& entry : escapees) {
1354                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1355                     continue;
1356
1357                 if (dependencies.find(entry.key)->value.isEmpty()) {
1358                     materializeFirst(WTFMove(entry.value));
1359                     continue;
1360                 }
1361
1362                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1363                     materializeLast(WTFMove(entry.value));
1364                     continue;
1365                 }
1366             }
1367
1368             // We reach this only if there is an actual cycle that needs
1369             // breaking. Because we do not want to solve a NP-hard problem
1370             // here, we just heuristically pick a node and materialize it
1371             // first.
1372             if (materialized.isEmpty()) {
1373                 uint64_t maxEvaluation = 0;
1374                 Allocation* bestAllocation = nullptr;
1375                 for (auto& entry : escapees) {
1376                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1377                         continue;
1378
1379                     uint64_t evaluation =
1380                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1381                     if (evaluation > maxEvaluation) {
1382                         maxEvaluation = evaluation;
1383                         bestAllocation = &entry.value;
1384                     }
1385                 }
1386                 RELEASE_ASSERT(maxEvaluation > 0);
1387
1388                 materializeFirst(WTFMove(*bestAllocation));
1389             }
1390             RELEASE_ASSERT(!materialized.isEmpty());
1391
1392             for (Node* identifier : materialized)
1393                 escapees.remove(identifier);
1394         }
1395
1396         materialized.clear();
1397
1398         HashSet<Node*> escaped;
1399         for (const Allocation& allocation : toMaterialize)
1400             escaped.add(allocation.identifier());
1401         for (const Allocation& allocation : toMaterialize) {
1402             for (const auto& field : allocation.fields()) {
1403                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1404                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1405             }
1406             materialized.add(allocation.identifier());
1407         }
1408
1409         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1410             where, Vector<Node*>()).iterator->value;
1411
1412         for (const Allocation& allocation : toMaterialize) {
1413             Node* materialization = createMaterialization(allocation, where);
1414             materializations.append(materialization);
1415             m_materializationToEscapee.add(materialization, allocation.identifier());
1416         }
1417
1418         if (!toRecover.isEmpty()) {
1419             m_materializationSiteToRecoveries.add(
1420                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1421         }
1422
1423         // The hints need to be after the "real" recoveries so that we
1424         // don't hint not-yet-complete objects
1425         if (!hints.isEmpty()) {
1426             m_materializationSiteToRecoveries.add(
1427                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(hints);
1428         }
1429     }
1430
1431     Node* createMaterialization(const Allocation& allocation, Node* where)
1432     {
1433         // FIXME: This is the only place where we actually use the
1434         // fact that an allocation's identifier is indeed the node
1435         // that created the allocation.
1436         switch (allocation.kind()) {
1437         case Allocation::Kind::Object: {
1438             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1439             StructureSet* set = m_graph.addStructureSet(allocation.structures());
1440
1441             return m_graph.addNode(
1442                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1443                 where->origin.withSemantic(allocation.identifier()->origin.semantic),
1444                 OpInfo(set), OpInfo(data), 0, 0);
1445         }
1446
1447         case Allocation::Kind::GeneratorFunction:
1448         case Allocation::Kind::Function: {
1449             FrozenValue* executable = allocation.identifier()->cellOperand();
1450             
1451             NodeType nodeType =
1452                 allocation.kind() == Allocation::Kind::GeneratorFunction ? NewGeneratorFunction : NewFunction;
1453             
1454             return m_graph.addNode(
1455                 allocation.identifier()->prediction(), nodeType,
1456                 where->origin.withSemantic(
1457                     allocation.identifier()->origin.semantic),
1458                 OpInfo(executable));
1459             break;
1460         }
1461
1462         case Allocation::Kind::Activation: {
1463             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1464             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1465
1466             return m_graph.addNode(
1467                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1468                 where->origin.withSemantic(
1469                     allocation.identifier()->origin.semantic),
1470                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1471         }
1472
1473         default:
1474             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1475         }
1476     }
1477
1478     void promoteLocalHeap()
1479     {
1480         // Collect the set of heap locations that we will be operating
1481         // over.
1482         HashSet<PromotedHeapLocation> locations;
1483         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1484             m_heap = m_heapAtHead[block];
1485
1486             for (Node* node : *block) {
1487                 handleNode(
1488                     node,
1489                     [&] (PromotedHeapLocation location, LazyNode) {
1490                         // If the location is not on a sink candidate,
1491                         // we only sink it if it is read
1492                         if (m_sinkCandidates.contains(location.base()))
1493                             locations.add(location);
1494                     },
1495                     [&] (PromotedHeapLocation location) -> Node* {
1496                         locations.add(location);
1497                         return nullptr;
1498                     });
1499             }
1500         }
1501
1502         // Figure out which locations belong to which allocations.
1503         m_locationsForAllocation.clear();
1504         for (PromotedHeapLocation location : locations) {
1505             auto result = m_locationsForAllocation.add(
1506                 location.base(),
1507                 Vector<PromotedHeapLocation>());
1508             ASSERT(!result.iterator->value.contains(location));
1509             result.iterator->value.append(location);
1510         }
1511
1512         m_pointerSSA.reset();
1513         m_allocationSSA.reset();
1514
1515         // Collect the set of "variables" that we will be sinking.
1516         m_locationToVariable.clear();
1517         m_nodeToVariable.clear();
1518         Vector<Node*> indexToNode;
1519         Vector<PromotedHeapLocation> indexToLocation;
1520
1521         for (Node* index : m_sinkCandidates) {
1522             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1523             m_nodeToVariable.add(index, variable);
1524             ASSERT(indexToNode.size() == variable->index());
1525             indexToNode.append(index);
1526         }
1527
1528         for (PromotedHeapLocation location : locations) {
1529             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1530             m_locationToVariable.add(location, variable);
1531             ASSERT(indexToLocation.size() == variable->index());
1532             indexToLocation.append(location);
1533         }
1534
1535         // We insert all required constants at top of block 0 so that
1536         // they are inserted only once and we don't clutter the graph
1537         // with useless constants everywhere
1538         HashMap<FrozenValue*, Node*> lazyMapping;
1539         if (!m_bottom)
1540             m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
1541         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1542             m_heap = m_heapAtHead[block];
1543
1544             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1545                 Node* node = block->at(nodeIndex);
1546
1547                 // Some named properties can be added conditionally,
1548                 // and that would necessitate bottoms
1549                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1550                     if (location.kind() != NamedPropertyPLoc)
1551                         continue;
1552
1553                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1554                     m_pointerSSA.newDef(variable, block, m_bottom);
1555                 }
1556
1557                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1558                     Node* escapee = m_materializationToEscapee.get(materialization);
1559                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1560                 }
1561
1562                 if (m_sinkCandidates.contains(node))
1563                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1564
1565                 handleNode(
1566                     node,
1567                     [&] (PromotedHeapLocation location, LazyNode value) {
1568                         if (!locations.contains(location))
1569                             return;
1570
1571                         Node* nodeValue;
1572                         if (value.isNode())
1573                             nodeValue = value.asNode();
1574                         else {
1575                             auto iter = lazyMapping.find(value.asValue());
1576                             if (iter != lazyMapping.end())
1577                                 nodeValue = iter->value;
1578                             else {
1579                                 nodeValue = value.ensureIsNode(
1580                                     m_insertionSet, m_graph.block(0), 0);
1581                                 lazyMapping.add(value.asValue(), nodeValue);
1582                             }
1583                         }
1584
1585                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1586                         m_pointerSSA.newDef(variable, block, nodeValue);
1587                     },
1588                     [] (PromotedHeapLocation) -> Node* {
1589                         return nullptr;
1590                     });
1591             }
1592         }
1593         m_insertionSet.execute(m_graph.block(0));
1594
1595         // Run the SSA calculators to create Phis
1596         m_pointerSSA.computePhis(
1597             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1598                 PromotedHeapLocation location = indexToLocation[variable->index()];
1599
1600                 // Don't create Phi nodes for fields of dead allocations
1601                 if (!m_heapAtHead[block].isAllocation(location.base()))
1602                     return nullptr;
1603
1604                 // Don't create Phi nodes once we are escaped
1605                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1606                     return nullptr;
1607
1608                 // If we point to a single allocation, we will
1609                 // directly use its materialization
1610                 if (m_heapAtHead[block].follow(location))
1611                     return nullptr;
1612
1613                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1614                 phiNode->mergeFlags(NodeResultJS);
1615                 return phiNode;
1616             });
1617
1618         m_allocationSSA.computePhis(
1619             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1620                 Node* identifier = indexToNode[variable->index()];
1621
1622                 // Don't create Phi nodes for dead allocations
1623                 if (!m_heapAtHead[block].isAllocation(identifier))
1624                     return nullptr;
1625
1626                 // Don't create Phi nodes until we are escaped
1627                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1628                     return nullptr;
1629
1630                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
1631                 phiNode->mergeFlags(NodeResultJS);
1632                 return phiNode;
1633             });
1634
1635         // Place Phis in the right places, replace all uses of any load with the appropriate
1636         // value, and create the materialization nodes.
1637         LocalOSRAvailabilityCalculator availabilityCalculator;
1638         m_graph.clearReplacements();
1639         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1640             m_heap = m_heapAtHead[block];
1641             availabilityCalculator.beginBlock(block);
1642
1643             // These mapping tables are intended to be lazy. If
1644             // something is omitted from the table, it means that
1645             // there haven't been any local stores to the promoted
1646             // heap location (or any local materialization).
1647             m_localMapping.clear();
1648             m_escapeeToMaterialization.clear();
1649
1650             // Insert the Phi functions that we had previously
1651             // created.
1652             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1653                 SSACalculator::Variable* variable = phiDef->variable();
1654                 m_insertionSet.insert(0, phiDef->value());
1655
1656                 PromotedHeapLocation location = indexToLocation[variable->index()];
1657                 m_localMapping.set(location, phiDef->value());
1658
1659                 if (m_sinkCandidates.contains(location.base())) {
1660                     m_insertionSet.insert(
1661                         0,
1662                         location.createHint(
1663                             m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
1664                 }
1665             }
1666
1667             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1668                 SSACalculator::Variable* variable = phiDef->variable();
1669                 m_insertionSet.insert(0, phiDef->value());
1670
1671                 Node* identifier = indexToNode[variable->index()];
1672                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1673                 bool canExit = false;
1674                 insertOSRHintsForUpdate(
1675                     0, block->at(0)->origin, canExit,
1676                     availabilityCalculator.m_availability, identifier, phiDef->value());
1677             }
1678
1679             if (verbose) {
1680                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1681                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1682             }
1683
1684             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1685                 Node* node = block->at(nodeIndex);
1686                 bool canExit = true;
1687                 bool nextCanExit = node->origin.exitOK;
1688                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1689                     if (location.kind() != NamedPropertyPLoc)
1690                         continue;
1691
1692                     m_localMapping.set(location, m_bottom);
1693
1694                     if (m_sinkCandidates.contains(node)) {
1695                         m_insertionSet.insert(
1696                             nodeIndex + 1,
1697                             location.createHint(
1698                                 m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
1699                     }
1700                 }
1701
1702                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1703                     materialization->origin.exitOK &= canExit;
1704                     Node* escapee = m_materializationToEscapee.get(materialization);
1705                     populateMaterialization(block, materialization, escapee);
1706                     m_escapeeToMaterialization.set(escapee, materialization);
1707                     m_insertionSet.insert(nodeIndex, materialization);
1708                     if (verbose)
1709                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1710                 }
1711
1712                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1713                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
1714
1715                 // We need to put the OSR hints after the recoveries,
1716                 // because we only want the hints once the object is
1717                 // complete
1718                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1719                     Node* escapee = m_materializationToEscapee.get(materialization);
1720                     insertOSRHintsForUpdate(
1721                         nodeIndex, node->origin, canExit,
1722                         availabilityCalculator.m_availability, escapee, materialization);
1723                 }
1724
1725                 if (node->origin.exitOK && !canExit) {
1726                     // We indicate that the exit state is fine now. It is OK because we updated the
1727                     // state above. We need to indicate this manually because the validation doesn't
1728                     // have enough information to infer that the exit state is fine.
1729                     m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
1730                 }
1731
1732                 if (m_sinkCandidates.contains(node))
1733                     m_escapeeToMaterialization.set(node, node);
1734
1735                 availabilityCalculator.executeNode(node);
1736
1737                 bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
1738
1739                 bool doLower = false;
1740                 handleNode(
1741                     node,
1742                     [&] (PromotedHeapLocation location, LazyNode value) {
1743                         if (!locations.contains(location))
1744                             return;
1745
1746                         Node* nodeValue;
1747                         if (value.isNode())
1748                             nodeValue = value.asNode();
1749                         else
1750                             nodeValue = lazyMapping.get(value.asValue());
1751
1752                         nodeValue = resolve(block, nodeValue);
1753
1754                         m_localMapping.set(location, nodeValue);
1755
1756                         if (!m_sinkCandidates.contains(location.base()))
1757                             return;
1758
1759                         doLower = true;
1760
1761                         m_insertionSet.insert(
1762                             nodeIndex + 1,
1763                             location.createHint(
1764                                 m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
1765                     },
1766                     [&] (PromotedHeapLocation location) -> Node* {
1767                         return resolve(block, location);
1768                     });
1769
1770                 if (!nextCanExit && desiredNextExitOK) {
1771                     // We indicate that the exit state is fine now. We need to do this because we
1772                     // emitted hints that appear to invalidate the exit state.
1773                     m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
1774                 }
1775
1776                 if (m_sinkCandidates.contains(node) || doLower) {
1777                     switch (node->op()) {
1778                     case NewObject:
1779                         node->convertToPhantomNewObject();
1780                         break;
1781
1782                     case NewFunction:
1783                         node->convertToPhantomNewFunction();
1784                         break;
1785
1786                     case NewGeneratorFunction:
1787                         node->convertToPhantomNewGeneratorFunction();
1788                         break;
1789
1790                     case CreateActivation:
1791                         node->convertToPhantomCreateActivation();
1792                         break;
1793
1794                     default:
1795                         node->remove();
1796                         break;
1797                     }
1798                 }
1799
1800                 m_graph.doToChildren(
1801                     node,
1802                     [&] (Edge& edge) {
1803                         edge.setNode(resolve(block, edge.node()));
1804                     });
1805             }
1806
1807             // Gotta drop some Upsilons.
1808             NodeAndIndex terminal = block->findTerminal();
1809             size_t upsilonInsertionPoint = terminal.index;
1810             NodeOrigin upsilonOrigin = terminal.node->origin;
1811             for (BasicBlock* successorBlock : block->successors()) {
1812                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1813                     Node* phiNode = phiDef->value();
1814                     SSACalculator::Variable* variable = phiDef->variable();
1815                     PromotedHeapLocation location = indexToLocation[variable->index()];
1816                     Node* incoming = resolve(block, location);
1817
1818                     m_insertionSet.insertNode(
1819                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1820                         OpInfo(phiNode), incoming->defaultEdge());
1821                 }
1822
1823                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1824                     Node* phiNode = phiDef->value();
1825                     SSACalculator::Variable* variable = phiDef->variable();
1826                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1827
1828                     m_insertionSet.insertNode(
1829                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1830                         OpInfo(phiNode), incoming->defaultEdge());
1831                 }
1832             }
1833
1834             m_insertionSet.execute(block);
1835         }
1836     }
1837
1838     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1839     {
1840         // If we are currently pointing to a single local allocation,
1841         // simply return the associated materialization.
1842         if (Node* identifier = m_heap.follow(location))
1843             return getMaterialization(block, identifier);
1844
1845         if (Node* result = m_localMapping.get(location))
1846             return result;
1847
1848         // This implies that there is no local mapping. Find a non-local mapping.
1849         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1850             block, m_locationToVariable.get(location));
1851         ASSERT(def);
1852         ASSERT(def->value());
1853
1854         Node* result = def->value();
1855         if (result->replacement())
1856             result = result->replacement();
1857         ASSERT(!result->replacement());
1858
1859         m_localMapping.add(location, result);
1860         return result;
1861     }
1862
1863     Node* resolve(BasicBlock* block, Node* node)
1864     {
1865         // If we are currently pointing to a single local allocation,
1866         // simply return the associated materialization.
1867         if (Node* identifier = m_heap.follow(node))
1868             return getMaterialization(block, identifier);
1869
1870         if (node->replacement())
1871             node = node->replacement();
1872         ASSERT(!node->replacement());
1873
1874         return node;
1875     }
1876
1877     Node* getMaterialization(BasicBlock* block, Node* identifier)
1878     {
1879         ASSERT(m_heap.isAllocation(identifier));
1880         if (!m_sinkCandidates.contains(identifier))
1881             return identifier;
1882
1883         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1884             return materialization;
1885
1886         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1887             block, m_nodeToVariable.get(identifier));
1888         ASSERT(def && def->value());
1889         m_escapeeToMaterialization.add(identifier, def->value());
1890         ASSERT(!def->value()->replacement());
1891         return def->value();
1892     }
1893
1894     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
1895     {
1896         // We need to follow() the value in the heap.
1897         // Consider the following graph:
1898         //
1899         // Block #0
1900         //   0: NewObject({})
1901         //   1: NewObject({})
1902         //   -: PutByOffset(@0, @1, x:0)
1903         //   -: PutStructure(@0, {x:0})
1904         //   2: GetByOffset(@0, x:0)
1905         //   -: MovHint(@2, loc1)
1906         //   -: Branch(#1, #2)
1907         //
1908         // Block #1
1909         //   3: Call(f, @1)
1910         //   4: Return(@0)
1911         //
1912         // Block #2
1913         //   -: Return(undefined)
1914         //
1915         // We need to materialize @1 at @3, and when doing so we need
1916         // to insert a MovHint for the materialization into loc1 as
1917         // well.
1918         // In order to do this, we say that we need to insert an
1919         // update hint for any availability whose node resolve()s to
1920         // the materialization.
1921         for (auto entry : availability.m_heap) {
1922             if (!entry.value.hasNode())
1923                 continue;
1924             if (m_heap.follow(entry.value.node()) != escapee)
1925                 continue;
1926
1927             m_insertionSet.insert(
1928                 nodeIndex,
1929                 entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
1930         }
1931
1932         for (unsigned i = availability.m_locals.size(); i--;) {
1933             if (!availability.m_locals[i].hasNode())
1934                 continue;
1935             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
1936                 continue;
1937
1938             int operand = availability.m_locals.operandForIndex(i);
1939             m_insertionSet.insertNode(
1940                 nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
1941                 materialization->defaultEdge());
1942         }
1943     }
1944
1945     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
1946     {
1947         Allocation& allocation = m_heap.getAllocation(escapee);
1948         switch (node->op()) {
1949         case MaterializeNewObject: {
1950             ObjectMaterializationData& data = node->objectMaterializationData();
1951             unsigned firstChild = m_graph.m_varArgChildren.size();
1952
1953             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1954
1955             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
1956             ASSERT(locations.contains(structure));
1957
1958             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
1959
1960             for (PromotedHeapLocation location : locations) {
1961                 switch (location.kind()) {
1962                 case StructurePLoc:
1963                     ASSERT(location == structure);
1964                     break;
1965
1966                 case NamedPropertyPLoc: {
1967                     ASSERT(location.base() == allocation.identifier());
1968                     data.m_properties.append(location.descriptor());
1969                     Node* value = resolve(block, location);
1970                     if (m_sinkCandidates.contains(value))
1971                         m_graph.m_varArgChildren.append(m_bottom);
1972                     else
1973                         m_graph.m_varArgChildren.append(value);
1974                     break;
1975                 }
1976
1977                 default:
1978                     DFG_CRASH(m_graph, node, "Bad location kind");
1979                 }
1980             }
1981
1982             node->children = AdjacencyList(
1983                 AdjacencyList::Variable,
1984                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1985             break;
1986         }
1987
1988         case MaterializeCreateActivation: {
1989             ObjectMaterializationData& data = node->objectMaterializationData();
1990
1991             unsigned firstChild = m_graph.m_varArgChildren.size();
1992
1993             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1994
1995             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
1996             ASSERT(locations.contains(symbolTable));
1997             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
1998             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
1999
2000             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
2001             ASSERT(locations.contains(scope));
2002             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
2003
2004             for (PromotedHeapLocation location : locations) {
2005                 switch (location.kind()) {
2006                 case ActivationScopePLoc: {
2007                     ASSERT(location == scope);
2008                     break;
2009                 }
2010
2011                 case ActivationSymbolTablePLoc: {
2012                     ASSERT(location == symbolTable);
2013                     break;
2014                 }
2015
2016                 case ClosureVarPLoc: {
2017                     ASSERT(location.base() == allocation.identifier());
2018                     data.m_properties.append(location.descriptor());
2019                     Node* value = resolve(block, location);
2020                     if (m_sinkCandidates.contains(value))
2021                         m_graph.m_varArgChildren.append(m_bottom);
2022                     else
2023                         m_graph.m_varArgChildren.append(value);
2024                     break;
2025                 }
2026
2027                 default:
2028                     DFG_CRASH(m_graph, node, "Bad location kind");
2029                 }
2030             }
2031
2032             node->children = AdjacencyList(
2033                 AdjacencyList::Variable,
2034                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
2035             break;
2036         }
2037         
2038         case NewFunction:
2039         case NewGeneratorFunction: {
2040             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
2041             ASSERT(locations.size() == 2);
2042                 
2043             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
2044             ASSERT_UNUSED(executable, locations.contains(executable));
2045                 
2046             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
2047             ASSERT(locations.contains(activation));
2048
2049             node->child1() = Edge(resolve(block, activation), KnownCellUse);
2050             break;
2051         }
2052
2053         default:
2054             DFG_CRASH(m_graph, node, "Bad materialize op");
2055         }
2056     }
2057
2058     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
2059     {
2060         if (verbose)
2061             dataLog("Recovering ", location, " at ", where, "\n");
2062         ASSERT(location.base()->isPhantomAllocation());
2063         Node* base = getMaterialization(block, location.base());
2064         Node* value = resolve(block, location);
2065
2066         NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
2067
2068         if (verbose)
2069             dataLog("Base is ", base, " and value is ", value, "\n");
2070
2071         if (base->isPhantomAllocation()) {
2072             return PromotedHeapLocation(base, location.descriptor()).createHint(
2073                 m_graph, origin.takeValidExit(canExit), value);
2074         }
2075
2076         switch (location.kind()) {
2077         case NamedPropertyPLoc: {
2078             Allocation& allocation = m_heap.getAllocation(location.base());
2079
2080             Vector<Structure*> structures;
2081             structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2082             unsigned identifierNumber = location.info();
2083             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2084
2085             std::sort(
2086                 structures.begin(),
2087                 structures.end(),
2088                 [uid] (Structure *a, Structure* b) -> bool {
2089                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2090                 });
2091
2092             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2093
2094             if (firstOffset == structures.last()->getConcurrently(uid)) {
2095                 Node* storage = base;
2096                 // FIXME: When we decide to sink objects with a
2097                 // property storage, we should handle non-inline offsets.
2098                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2099
2100                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2101                 data->offset = firstOffset;
2102                 data->identifierNumber = identifierNumber;
2103
2104                 return m_graph.addNode(
2105                     SpecNone,
2106                     PutByOffset,
2107                     origin.takeValidExit(canExit),
2108                     OpInfo(data),
2109                     Edge(storage, KnownCellUse),
2110                     Edge(base, KnownCellUse),
2111                     value->defaultEdge());
2112             }
2113
2114             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2115             data->identifierNumber = identifierNumber;
2116
2117             {
2118                 PropertyOffset currentOffset = firstOffset;
2119                 StructureSet currentSet;
2120                 for (Structure* structure : structures) {
2121                     PropertyOffset offset = structure->getConcurrently(uid);
2122                     if (offset != currentOffset) {
2123                         // Because our analysis treats MultiPutByOffset like an escape, we only have to
2124                         // deal with storing results that would have been previously stored by PutByOffset
2125                         // nodes. Those nodes were guarded by the appropriate type checks. This means that
2126                         // at this point, we can simply trust that the incoming value has the right type
2127                         // for whatever structure we are using.
2128                         data->variants.append(
2129                             PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2130                         currentOffset = offset;
2131                         currentSet.clear();
2132                     }
2133                     currentSet.add(structure);
2134                 }
2135                 data->variants.append(
2136                     PutByIdVariant::replace(currentSet, currentOffset, InferredType::Top));
2137             }
2138
2139             return m_graph.addNode(
2140                 SpecNone,
2141                 MultiPutByOffset,
2142                 origin.takeValidExit(canExit),
2143                 OpInfo(data),
2144                 Edge(base, KnownCellUse),
2145                 value->defaultEdge());
2146             break;
2147         }
2148
2149         case ClosureVarPLoc: {
2150             return m_graph.addNode(
2151                 SpecNone,
2152                 PutClosureVar,
2153                 origin.takeValidExit(canExit),
2154                 OpInfo(location.info()),
2155                 Edge(base, KnownCellUse),
2156                 value->defaultEdge());
2157             break;
2158         }
2159
2160         default:
2161             DFG_CRASH(m_graph, base, "Bad location kind");
2162             break;
2163         }
2164     }
2165
2166     // This is a great way of asking value->isStillValid() without having to worry about getting
2167     // different answers. It turns out that this analysis works OK regardless of what this
2168     // returns but breaks badly if this changes its mind for any particular InferredValue. This
2169     // method protects us from that.
2170     bool isStillValid(InferredValue* value)
2171     {
2172         return m_validInferredValues.add(value, value->isStillValid()).iterator->value;
2173     }
2174
2175     SSACalculator m_pointerSSA;
2176     SSACalculator m_allocationSSA;
2177     HashSet<Node*> m_sinkCandidates;
2178     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2179     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2180     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2181     HashMap<Node*, Node*> m_escapeeToMaterialization;
2182     InsertionSet m_insertionSet;
2183     CombinedLiveness m_combinedLiveness;
2184
2185     HashMap<InferredValue*, bool> m_validInferredValues;
2186
2187     HashMap<Node*, Node*> m_materializationToEscapee;
2188     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2189     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2190
2191     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2192
2193     BlockMap<LocalHeap> m_heapAtHead;
2194     BlockMap<LocalHeap> m_heapAtTail;
2195     LocalHeap m_heap;
2196
2197     Node* m_bottom = nullptr;
2198 };
2199
2200 }
2201
2202 bool performObjectAllocationSinking(Graph& graph)
2203 {
2204     return runPhase<ObjectAllocationSinkingPhase>(graph);
2205 }
2206
2207 } } // namespace JSC::DFG
2208
2209 #endif // ENABLE(DFG_JIT)