Object cycles should not prevent allocation elimination/sinking
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
1 /*
2  * Copyright (C) 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "DFGObjectAllocationSinkingPhase.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGBlockMapInlines.h"
32 #include "DFGCombinedLiveness.h"
33 #include "DFGGraph.h"
34 #include "DFGInsertionSet.h"
35 #include "DFGLazyNode.h"
36 #include "DFGLivenessAnalysisPhase.h"
37 #include "DFGOSRAvailabilityAnalysisPhase.h"
38 #include "DFGPhase.h"
39 #include "DFGPromotedHeapLocation.h"
40 #include "DFGSSACalculator.h"
41 #include "DFGValidate.h"
42 #include "JSCInlines.h"
43
44 #include <list>
45
46 namespace JSC { namespace DFG {
47
48 namespace {
49
50 bool verbose = false;
51
52 // In order to sink object cycles, we use a points-to analysis coupled
53 // with an escape analysis. This analysis is actually similar to an
54 // abstract interpreter focused on local allocations and ignoring
55 // everything else.
56 //
57 // We represent the local heap using two mappings:
58 //
59 // - A set of the local allocations present in the function, where
60 //   each of those have a further mapping from
61 //   PromotedLocationDescriptor to local allocations they must point
62 //   to.
63 //
64 // - A "pointer" mapping from nodes to local allocations, if they must
65 //   be equal to said local allocation and are currently live. This
66 //   can be because the node is the actual node that created the
67 //   allocation, or any other node that must currently point to it -
68 //   we don't make a difference.
69 //
70 // The following graph is a motivation for why we separate allocations
71 // from pointers:
72 //
73 // Block #0
74 //  0: NewObject({})
75 //  1: NewObject({})
76 //  -: PutByOffset(@0, @1, x)
77 //  -: PutStructure(@0, {x:0})
78 //  2: GetByOffset(@0, x)
79 //  -: Jump(#1)
80 //
81 // Block #1
82 //  -: Return(@2)
83 //
84 // Here, we need to remember in block #1 that @2 points to a local
85 // allocation with appropriate fields and structures information
86 // (because we should be able to place a materialization on top of
87 // block #1 here), even though @1 is dead. We *could* just keep @1
88 // artificially alive here, but there is no real reason to do it:
89 // after all, by the end of block #0, @1 and @2 should be completely
90 // interchangeable, and there is no reason for us to artificially make
91 // @1 more important.
92 //
93 // An important point to consider to understand this separation is
94 // that we should think of the local heap as follow: we have a
95 // bunch of nodes that are pointers to "allocations" that live
96 // someplace on the heap, and those allocations can have pointers in
97 // between themselves as well. We shouldn't care about whatever
98 // names we give to the allocations ; what matters when
99 // comparing/merging two heaps is the isomorphism/comparison between
100 // the allocation graphs as seen by the nodes.
101 //
102 // For instance, in the following graph:
103 //
104 // Block #0
105 //  0: NewObject({})
106 //  -: Branch(#1, #2)
107 //
108 // Block #1
109 //  1: NewObject({})
110 //  -: PutByOffset(@0, @1, x)
111 //  -: PutStructure(@0, {x:0})
112 //  -: Jump(#3)
113 //
114 // Block #2
115 //  2: NewObject({})
116 //  -: PutByOffset(@2, undefined, x)
117 //  -: PutStructure(@2, {x:0})
118 //  -: PutByOffset(@0, @2, x)
119 //  -: PutStructure(@0, {x:0})
120 //  -: Jump(#3)
121 //
122 // Block #3
123 //  -: Return(@0)
124 //
125 // we should think of the heaps at tail of blocks #1 and #2 as being
126 // exactly the same, even though one has @0.x pointing to @1 and the
127 // other has @0.x pointing to @2, because in essence this should not
128 // be different from the graph where we hoisted @1 and @2 into a
129 // single allocation in block #0. We currently will not handle this
130 // case, because we merge allocations based on the node they are
131 // coming from, but this is only a technicality for the sake of
132 // simplicity that shouldn't hide the deeper idea outlined here.
133
134 class Allocation {
135 public:
136     // We use Escaped as a special allocation kind because when we
137     // decide to sink an allocation, we still need to keep track of it
138     // once it is escaped if it still has pointers to it in order to
139     // replace any use of those pointers by the corresponding
140     // materialization
141     enum class Kind { Escaped, Object, Activation, Function };
142
143     explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
144         : m_identifier(identifier)
145         , m_kind(kind)
146     {
147     }
148
149
150     const HashMap<PromotedLocationDescriptor, Node*>& fields() const
151     {
152         return m_fields;
153     }
154
155     Node* get(PromotedLocationDescriptor descriptor)
156     {
157         return m_fields.get(descriptor);
158     }
159
160     Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
161     {
162         // Pointing to anything else than an unescaped local
163         // allocation is represented by simply not having the
164         // field
165         if (value)
166             m_fields.set(descriptor, value);
167         else
168             m_fields.remove(descriptor);
169         return *this;
170     }
171
172     void remove(PromotedLocationDescriptor descriptor)
173     {
174         set(descriptor, nullptr);
175     }
176
177     bool hasStructures() const
178     {
179         switch (kind()) {
180         case Kind::Object:
181             return true;
182
183         default:
184             return false;
185         }
186     }
187
188     Allocation& setStructures(const StructureSet& structures)
189     {
190         ASSERT(hasStructures() && !structures.isEmpty());
191         m_structures = structures;
192         return *this;
193     }
194
195     Allocation& mergeStructures(const StructureSet& structures)
196     {
197         ASSERT(hasStructures() || structures.isEmpty());
198         m_structures.merge(structures);
199         return *this;
200     }
201
202     Allocation& filterStructures(const StructureSet& structures)
203     {
204         ASSERT(hasStructures());
205         m_structures.filter(structures);
206         return *this;
207     }
208
209     const StructureSet& structures() const
210     {
211         return m_structures;
212     }
213
214     Node* identifier() const { return m_identifier; }
215
216     Kind kind() const { return m_kind; }
217
218     bool isEscapedAllocation() const
219     {
220         return kind() == Kind::Escaped;
221     }
222
223     bool isObjectAllocation() const
224     {
225         return m_kind == Kind::Object;
226     }
227
228     bool isActivationAllocation() const
229     {
230         return m_kind == Kind::Activation;
231     }
232
233     bool isFunctionAllocation() const
234     {
235         return m_kind == Kind::Function;
236     }
237
238     bool operator==(const Allocation& other) const
239     {
240         return m_identifier == other.m_identifier
241             && m_kind == other.m_kind
242             && m_fields == other.m_fields
243             && m_structures == other.m_structures;
244     }
245
246     bool operator!=(const Allocation& other) const
247     {
248         return !(*this == other);
249     }
250
251     void dump(PrintStream& out) const
252     {
253         dumpInContext(out, nullptr);
254     }
255
256     void dumpInContext(PrintStream& out, DumpContext* context) const
257     {
258         switch (m_kind) {
259         case Kind::Escaped:
260             out.print("Escaped");
261             break;
262
263         case Kind::Object:
264             out.print("Object");
265             break;
266
267         case Kind::Function:
268             out.print("Function");
269             break;
270
271         case Kind::Activation:
272             out.print("Activation");
273             break;
274         }
275         out.print("Allocation(");
276         if (!m_structures.isEmpty())
277             out.print(inContext(m_structures, context));
278         if (!m_fields.isEmpty()) {
279             if (!m_structures.isEmpty())
280                 out.print(", ");
281             out.print(mapDump(m_fields, " => #", ", "));
282         }
283         out.print(")");
284     }
285
286 private:
287     Node* m_identifier; // This is the actual node that created the allocation
288     Kind m_kind;
289     HashMap<PromotedLocationDescriptor, Node*> m_fields;
290     StructureSet m_structures;
291 };
292
293 class LocalHeap {
294 public:
295     Allocation& newAllocation(Node* node, Allocation::Kind kind)
296     {
297         ASSERT(!m_pointers.contains(node) && !isAllocation(node));
298         m_pointers.add(node, node);
299         return m_allocations.set(node, Allocation(node, kind)).iterator->value;
300     }
301
302     bool isAllocation(Node* identifier) const
303     {
304         return m_allocations.contains(identifier);
305     }
306
307     // Note that this is fundamentally different from
308     // onlyLocalAllocation() below. getAllocation() takes as argument
309     // a node-as-identifier, that is, an allocation node. This
310     // allocation node doesn't have to be alive; it may only be
311     // pointed to by other nodes or allocation fields.
312     // For instance, in the following graph:
313     //
314     // Block #0
315     //  0: NewObject({})
316     //  1: NewObject({})
317     //  -: PutByOffset(@0, @1, x)
318     //  -: PutStructure(@0, {x:0})
319     //  2: GetByOffset(@0, x)
320     //  -: Jump(#1)
321     //
322     // Block #1
323     //  -: Return(@2)
324     //
325     // At head of block #1, the only reachable allocation is #@1,
326     // which can be reached through node @2. Thus, getAllocation(#@1)
327     // contains the appropriate metadata for this allocation, but
328     // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
329     // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
330     // is the same as getAllocation(#@1), while getAllocation(#@2)
331     // does not make sense since @2 is not an allocation node.
332     //
333     // This is meant to be used when the node is already known to be
334     // an identifier (i.e. an allocation) - probably because it was
335     // found as value of a field or pointer in the current heap, or
336     // was the result of a call to follow(). In any other cases (such
337     // as when doing anything while traversing the graph), the
338     // appropriate function to call is probably onlyLocalAllocation.
339     Allocation& getAllocation(Node* identifier)
340     {
341         auto iter = m_allocations.find(identifier);
342         ASSERT(iter != m_allocations.end());
343         return iter->value;
344     }
345
346     void newPointer(Node* node, Node* identifier)
347     {
348         ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
349         ASSERT(isAllocation(identifier));
350         m_pointers.add(node, identifier);
351     }
352
353     // follow solves the points-to problem. Given a live node, which
354     // may be either an allocation itself or a heap read (e.g. a
355     // GetByOffset node), it returns the corresponding allocation
356     // node, if there is one. If the argument node is neither an
357     // allocation or a heap read, or may point to different nodes,
358     // nullptr will be returned. Note that a node that points to
359     // different nodes can never point to an unescaped local
360     // allocation.
361     Node* follow(Node* node) const
362     {
363         auto iter = m_pointers.find(node);
364         ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
365         return iter == m_pointers.end() ? nullptr : iter->value;
366     }
367
368     Node* follow(PromotedHeapLocation location) const
369     {
370         const Allocation& base = m_allocations.find(location.base())->value;
371         auto iter = base.fields().find(location.descriptor());
372
373         if (iter == base.fields().end())
374             return nullptr;
375
376         return iter->value;
377     }
378
379     // onlyLocalAllocation find the corresponding allocation metadata
380     // for any live node. onlyLocalAllocation(node) is essentially
381     // getAllocation(follow(node)), with appropriate null handling.
382     Allocation* onlyLocalAllocation(Node* node)
383     {
384         Node* identifier = follow(node);
385         if (!identifier)
386             return nullptr;
387
388         return &getAllocation(identifier);
389     }
390
391     Allocation* onlyLocalAllocation(PromotedHeapLocation location)
392     {
393         Node* identifier = follow(location);
394         if (!identifier)
395             return nullptr;
396
397         return &getAllocation(identifier);
398     }
399
400     // This allows us to store the escapees only when necessary. If
401     // set, the current escapees can be retrieved at any time using
402     // takeEscapees(), which will clear the cached set of escapees;
403     // otherwise the heap won't remember escaping allocations.
404     void setWantEscapees()
405     {
406         m_wantEscapees = true;
407     }
408
409     HashMap<Node*, Allocation> takeEscapees()
410     {
411         return WTF::move(m_escapees);
412     }
413
414     void escape(Node* node)
415     {
416         Node* identifier = follow(node);
417         if (!identifier)
418             return;
419
420         escapeAllocation(identifier);
421     }
422
423     void merge(const LocalHeap& other)
424     {
425         assertIsValid();
426         other.assertIsValid();
427         ASSERT(!m_wantEscapees);
428
429         if (!reached()) {
430             ASSERT(other.reached());
431             *this = other;
432             return;
433         }
434
435         HashSet<Node*> toEscape;
436
437         for (auto& allocationEntry : other.m_allocations)
438             m_allocations.add(allocationEntry.key, allocationEntry.value);
439         for (auto& allocationEntry : m_allocations) {
440             auto allocationIter = other.m_allocations.find(allocationEntry.key);
441
442             // If we have it and they don't, it died for them but we
443             // are keeping it alive from another field somewhere.
444             // There is nothing to do - we will be escaped
445             // automatically when we handle that other field.
446             // This will also happen for allocation that we have and
447             // they don't, and all of those will get pruned.
448             if (allocationIter == other.m_allocations.end())
449                 continue;
450
451             if (allocationEntry.value.kind() != allocationIter->value.kind()) {
452                 toEscape.add(allocationEntry.key);
453                 for (const auto& fieldEntry : allocationIter->value.fields())
454                     toEscape.add(fieldEntry.value);
455             } else {
456                 mergePointerSets(
457                     allocationEntry.value.fields(), allocationIter->value.fields(),
458                     [&] (Node* identifier) {
459                         toEscape.add(identifier);
460                     },
461                     [&] (PromotedLocationDescriptor field) {
462                         allocationEntry.value.remove(field);
463                     });
464                 allocationEntry.value.mergeStructures(allocationIter->value.structures());
465             }
466         }
467
468         mergePointerSets(m_pointers, other.m_pointers,
469             [&] (Node* identifier) {
470                 toEscape.add(identifier);
471             },
472             [&] (Node* field) {
473                 m_pointers.remove(field);
474             });
475
476         for (Node* identifier : toEscape)
477             escapeAllocation(identifier);
478
479         if (!ASSERT_DISABLED) {
480             for (const auto& entry : m_allocations)
481                 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
482         }
483
484         // If there is no remaining pointer to an allocation, we can
485         // remove it. This should only happen for escaped allocations,
486         // because we only merge liveness-pruned heaps in the first
487         // place.
488         prune();
489
490         assertIsValid();
491     }
492
493     void pruneByLiveness(const HashSet<Node*>& live)
494     {
495         Vector<Node*> toRemove;
496         for (const auto& entry : m_pointers) {
497             if (!live.contains(entry.key))
498                 toRemove.append(entry.key);
499         }
500         for (Node* node : toRemove)
501             m_pointers.remove(node);
502
503         prune();
504     }
505
506     void assertIsValid() const
507     {
508         if (ASSERT_DISABLED)
509             return;
510
511         // Pointers should point to an actual allocation
512         for (const auto& entry : m_pointers) {
513             ASSERT_UNUSED(entry, entry.value);
514             ASSERT(m_allocations.contains(entry.value));
515         }
516
517         for (const auto& allocationEntry : m_allocations) {
518             // Fields should point to an actual allocation
519             for (const auto& fieldEntry : allocationEntry.value.fields()) {
520                 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
521                 ASSERT(m_allocations.contains(fieldEntry.value));
522             }
523         }
524     }
525
526     bool operator==(const LocalHeap& other) const
527     {
528         assertIsValid();
529         other.assertIsValid();
530         return m_allocations == other.m_allocations
531             && m_pointers == other.m_pointers;
532     }
533
534     bool operator!=(const LocalHeap& other) const
535     {
536         return !(*this == other);
537     }
538
539     const HashMap<Node*, Allocation> allocations() const
540     {
541         return m_allocations;
542     }
543
544     const HashMap<Node*, Node*> pointers() const
545     {
546         return m_pointers;
547     }
548
549     void dump(PrintStream& out) const
550     {
551         out.print("  Allocations:\n");
552         for (const auto& entry : m_allocations)
553             out.print("    #", entry.key, ": ", entry.value, "\n");
554         out.print("  Pointers:\n");
555         for (const auto& entry : m_pointers)
556             out.print("    ", entry.key, " => #", entry.value, "\n");
557     }
558
559     bool reached() const
560     {
561         return m_reached;
562     }
563
564     void setReached()
565     {
566         m_reached = true;
567     }
568
569 private:
570     // When we merge two heaps, we escape all fields of allocations,
571     // unless they point to the same thing in both heaps.
572     // The reason for this is that it allows us not to do extra work
573     // for diamond graphs where we would otherwise have to check
574     // whether we have a single definition or not, which would be
575     // cumbersome.
576     //
577     // Note that we should try to unify nodes even when they are not
578     // from the same allocation; for instance we should be able to
579     // completely eliminate all allocations from the following graph:
580     //
581     // Block #0
582     //  0: NewObject({})
583     //  -: Branch(#1, #2)
584     //
585     // Block #1
586     //  1: NewObject({})
587     //  -: PutByOffset(@1, "left", val)
588     //  -: PutStructure(@1, {val:0})
589     //  -: PutByOffset(@0, @1, x)
590     //  -: PutStructure(@0, {x:0})
591     //  -: Jump(#3)
592     //
593     // Block #2
594     //  2: NewObject({})
595     //  -: PutByOffset(@2, "right", val)
596     //  -: PutStructure(@2, {val:0})
597     //  -: PutByOffset(@0, @2, x)
598     //  -: PutStructure(@0, {x:0})
599     //  -: Jump(#3)
600     //
601     // Block #3:
602     //  3: GetByOffset(@0, x)
603     //  4: GetByOffset(@3, val)
604     //  -: Return(@4)
605     template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
606     void mergePointerSets(
607         const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
608         const EscapeFunctor& escape, const RemoveFunctor& remove)
609     {
610         Vector<Key> toRemove;
611         for (const auto& entry : my) {
612             auto iter = their.find(entry.key);
613             if (iter == their.end()) {
614                 toRemove.append(entry.key);
615                 escape(entry.value);
616             } else if (iter->value != entry.value) {
617                 toRemove.append(entry.key);
618                 escape(entry.value);
619                 escape(iter->value);
620             }
621         }
622         for (const auto& entry : their) {
623             if (my.contains(entry.key))
624                 continue;
625             escape(entry.value);
626         }
627         for (Key key : toRemove)
628             remove(key);
629     }
630
631     void escapeAllocation(Node* identifier)
632     {
633         Allocation& allocation = getAllocation(identifier);
634         if (allocation.isEscapedAllocation())
635             return;
636
637         Allocation unescaped = WTF::move(allocation);
638         allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
639
640         for (const auto& entry : unescaped.fields())
641             escapeAllocation(entry.value);
642
643         if (m_wantEscapees)
644             m_escapees.add(unescaped.identifier(), WTF::move(unescaped));
645     }
646
647     void prune()
648     {
649         HashSet<Node*> reachable;
650         for (const auto& entry : m_pointers)
651             reachable.add(entry.value);
652
653         // Repeatedly mark as reachable allocations in fields of other
654         // reachable allocations
655         {
656             Vector<Node*> worklist;
657             worklist.appendRange(reachable.begin(), reachable.end());
658
659             while (!worklist.isEmpty()) {
660                 Node* identifier = worklist.takeLast();
661                 Allocation& allocation = m_allocations.find(identifier)->value;
662                 for (const auto& entry : allocation.fields()) {
663                     if (reachable.add(entry.value).isNewEntry)
664                         worklist.append(entry.value);
665                 }
666             }
667         }
668
669         // Remove unreachable allocations
670         {
671             Vector<Node*> toRemove;
672             for (const auto& entry : m_allocations) {
673                 if (!reachable.contains(entry.key))
674                     toRemove.append(entry.key);
675             }
676             for (Node* identifier : toRemove)
677                 m_allocations.remove(identifier);
678         }
679     }
680
681     bool m_reached = false;
682     HashMap<Node*, Node*> m_pointers;
683     HashMap<Node*, Allocation> m_allocations;
684
685     bool m_wantEscapees = false;
686     HashMap<Node*, Allocation> m_escapees;
687 };
688
689 class ObjectAllocationSinkingPhase : public Phase {
690 public:
691     ObjectAllocationSinkingPhase(Graph& graph)
692         : Phase(graph, "object allocation elimination")
693         , m_pointerSSA(graph)
694         , m_allocationSSA(graph)
695         , m_insertionSet(graph)
696     {
697     }
698
699     bool run()
700     {
701         ASSERT(m_graph.m_form == SSA);
702         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
703
704         if (!performSinking())
705             return false;
706
707         if (verbose) {
708             dataLog("Graph after elimination:\n");
709             m_graph.dump();
710         }
711
712         return true;
713     }
714
715 private:
716     bool performSinking()
717     {
718         m_graph.computeRefCounts();
719         m_graph.initializeNodeOwners();
720         performLivenessAnalysis(m_graph);
721         performOSRAvailabilityAnalysis(m_graph);
722         m_combinedLiveness = CombinedLiveness(m_graph);
723
724         CString graphBeforeSinking;
725         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
726             StringPrintStream out;
727             m_graph.dump(out);
728             graphBeforeSinking = out.toCString();
729         }
730
731         if (verbose) {
732             dataLog("Graph before elimination:\n");
733             m_graph.dump();
734         }
735
736         performAnalysis();
737
738         if (!determineSinkCandidates())
739             return false;
740
741         if (verbose) {
742             for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
743                 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
744                 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
745             }
746         }
747
748         promoteLocalHeap();
749
750         if (Options::validateGraphAtEachPhase())
751             validate(m_graph, DumpGraph, graphBeforeSinking);
752         return true;
753     }
754
755     void performAnalysis()
756     {
757         m_heapAtHead = BlockMap<LocalHeap>(m_graph);
758         m_heapAtTail = BlockMap<LocalHeap>(m_graph);
759
760         bool changed;
761         do {
762             if (verbose)
763                 dataLog("Doing iteration of escape analysis.\n");
764             changed = false;
765
766             for (BasicBlock* block : m_graph.blocksInPreOrder()) {
767                 m_heapAtHead[block].setReached();
768                 m_heap = m_heapAtHead[block];
769
770                 for (Node* node : *block) {
771                     handleNode(
772                         node,
773                         [] (PromotedHeapLocation, LazyNode) { },
774                         [&] (PromotedHeapLocation) -> Node* {
775                             return nullptr;
776                         });
777                 }
778
779                 if (m_heap == m_heapAtTail[block])
780                     continue;
781
782                 m_heapAtTail[block] = m_heap;
783                 changed = true;
784
785                 m_heap.assertIsValid();
786
787                 // We keep only pointers that are live, and only
788                 // allocations that are either live, pointed to by a
789                 // live pointer, or (recursively) stored in a field of
790                 // a live allocation.
791                 //
792                 // This means we can accidentaly leak non-dominating
793                 // nodes into the successor. However, due to the
794                 // non-dominance property, we are guaranteed that the
795                 // successor has at least one predecessor that is not
796                 // dominated either: this means any reference to a
797                 // non-dominating allocation in the successor will
798                 // trigger an escape and get pruned during the merge.
799                 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
800
801                 for (BasicBlock* successorBlock : block->successors())
802                     m_heapAtHead[successorBlock].merge(m_heap);
803             }
804         } while (changed);
805     }
806
807     template<typename WriteFunctor, typename ResolveFunctor>
808     void handleNode(
809         Node* node,
810         const WriteFunctor& heapWrite,
811         const ResolveFunctor& heapResolve)
812     {
813         m_heap.assertIsValid();
814         ASSERT(m_heap.takeEscapees().isEmpty());
815
816         Allocation* target = nullptr;
817         HashMap<PromotedLocationDescriptor, LazyNode> writes;
818         PromotedLocationDescriptor exactRead;
819
820         switch (node->op()) {
821         case NewObject:
822             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
823             target->setStructures(node->structure());
824             writes.add(
825                 StructurePLoc, LazyNode(m_graph.freeze(node->structure())));
826             break;
827
828         case MaterializeNewObject: {
829             target = &m_heap.newAllocation(node, Allocation::Kind::Object);
830             target->setStructures(node->structureSet());
831             writes.add(
832                 StructurePLoc, LazyNode(m_graph.varArgChild(node, 0).node()));
833             for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
834                 writes.add(
835                     PromotedLocationDescriptor(
836                         NamedPropertyPLoc,
837                         node->objectMaterializationData().m_properties[i].m_identifierNumber),
838                     LazyNode(m_graph.varArgChild(node, i + 1).node()));
839             }
840             break;
841         }
842
843         case NewFunction: {
844             if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid()) {
845                 m_heap.escape(node->child1().node());
846                 break;
847             }
848             target = &m_heap.newAllocation(node, Allocation::Kind::Function);
849             writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
850             writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
851             break;
852         }
853
854         case CreateActivation: {
855             if (node->castOperand<SymbolTable*>()->singletonScope()->isStillValid()) {
856                 m_heap.escape(node->child1().node());
857                 break;
858             }
859             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
860             writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
861             writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
862             {
863                 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
864                 ConcurrentJITLocker locker(symbolTable->m_lock);
865                 LazyNode undefined(m_graph.freeze(jsUndefined()));
866                 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
867                     writes.add(
868                         PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
869                         undefined);
870                 }
871             }
872             break;
873         }
874
875         case MaterializeCreateActivation: {
876             // We have sunk this once already - there is no way the
877             // watchpoint is still valid.
878             ASSERT(!node->castOperand<SymbolTable*>()->singletonScope()->isStillValid());
879             target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
880             writes.add(ActivationSymbolTablePLoc, LazyNode(m_graph.varArgChild(node, 0).node()));
881             writes.add(ActivationScopePLoc, LazyNode(m_graph.varArgChild(node, 1).node()));
882             for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
883                 writes.add(
884                     PromotedLocationDescriptor(
885                         ClosureVarPLoc,
886                         node->objectMaterializationData().m_properties[i].m_identifierNumber),
887                     LazyNode(m_graph.varArgChild(node, i + 2).node()));
888             }
889             break;
890         }
891
892         case PutStructure:
893             target = m_heap.onlyLocalAllocation(node->child1().node());
894             if (target && target->isObjectAllocation()) {
895                 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next))));
896                 target->setStructures(node->transition()->next);
897             } else
898                 m_heap.escape(node->child1().node());
899             break;
900
901         case CheckStructure: {
902             Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
903             if (allocation && allocation->isObjectAllocation()) {
904                 allocation->filterStructures(node->structureSet());
905                 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
906                     node->convertToCheckStructureImmediate(value);
907             } else
908                 m_heap.escape(node->child1().node());
909             break;
910         }
911
912         case GetByOffset:
913         case GetGetterSetterByOffset:
914             target = m_heap.onlyLocalAllocation(node->child2().node());
915             if (target && target->isObjectAllocation()) {
916                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
917                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
918             } else {
919                 m_heap.escape(node->child1().node());
920                 m_heap.escape(node->child2().node());
921             }
922             break;
923
924         case MultiGetByOffset:
925             target = m_heap.onlyLocalAllocation(node->child1().node());
926             if (target && target->isObjectAllocation()) {
927                 unsigned identifierNumber = node->multiGetByOffsetData().identifierNumber;
928                 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
929             } else
930                 m_heap.escape(node->child1().node());
931             break;
932
933         case PutByOffset:
934             target = m_heap.onlyLocalAllocation(node->child2().node());
935             if (target && target->isObjectAllocation()) {
936                 unsigned identifierNumber = node->storageAccessData().identifierNumber;
937                 writes.add(
938                     PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
939                     LazyNode(node->child3().node()));
940             } else {
941                 m_heap.escape(node->child1().node());
942                 m_heap.escape(node->child2().node());
943                 m_heap.escape(node->child3().node());
944             }
945             break;
946
947         case GetClosureVar:
948             target = m_heap.onlyLocalAllocation(node->child1().node());
949             if (target && target->isActivationAllocation()) {
950                 exactRead =
951                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
952             } else
953                 m_heap.escape(node->child1().node());
954             break;
955
956         case PutClosureVar:
957             target = m_heap.onlyLocalAllocation(node->child1().node());
958             if (target && target->isActivationAllocation()) {
959                 writes.add(
960                     PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
961                     LazyNode(node->child2().node()));
962             } else {
963                 m_heap.escape(node->child1().node());
964                 m_heap.escape(node->child2().node());
965             }
966             break;
967
968         case SkipScope:
969             target = m_heap.onlyLocalAllocation(node->child1().node());
970             if (target && target->isActivationAllocation())
971                 exactRead = ActivationScopePLoc;
972             else
973                 m_heap.escape(node->child1().node());
974             break;
975
976         case GetExecutable:
977             target = m_heap.onlyLocalAllocation(node->child1().node());
978             if (target && target->isFunctionAllocation())
979                 exactRead = FunctionExecutablePLoc;
980             else
981                 m_heap.escape(node->child1().node());
982             break;
983
984         case GetScope:
985             target = m_heap.onlyLocalAllocation(node->child1().node());
986             if (target && target->isFunctionAllocation())
987                 exactRead = FunctionActivationPLoc;
988             else
989                 m_heap.escape(node->child1().node());
990             break;
991
992         case Check:
993             m_graph.doToChildren(
994                 node,
995                 [&] (Edge edge) {
996                     if (edge.willNotHaveCheck())
997                         return;
998
999                     if (alreadyChecked(edge.useKind(), SpecObject))
1000                         return;
1001
1002                     m_heap.escape(edge.node());
1003                 });
1004             break;
1005
1006         case MovHint:
1007         case PutHint:
1008             // Handled by OSR availability analysis
1009             break;
1010
1011         default:
1012             m_graph.doToChildren(
1013                 node,
1014                 [&] (Edge edge) {
1015                     m_heap.escape(edge.node());
1016                 });
1017             break;
1018         }
1019
1020         if (exactRead) {
1021             ASSERT(target);
1022             ASSERT(writes.isEmpty());
1023             if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1024                 ASSERT(!value->replacement());
1025                 node->replaceWith(value);
1026             }
1027             Node* identifier = target->get(exactRead);
1028             if (identifier)
1029                 m_heap.newPointer(node, identifier);
1030         }
1031
1032         for (auto entry : writes) {
1033             ASSERT(target);
1034             if (entry.value.isNode())
1035                 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1036             else
1037                 target->remove(entry.key);
1038             heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1039         }
1040
1041         m_heap.assertIsValid();
1042     }
1043
1044     bool determineSinkCandidates()
1045     {
1046         m_sinkCandidates.clear();
1047         m_materializationToEscapee.clear();
1048         m_materializationSiteToMaterializations.clear();
1049         m_materializationSiteToRecoveries.clear();
1050
1051         // Logically we wish to consider every allocation and sink
1052         // it. However, it is probably not profitable to sink an
1053         // allocation that will always escape. So, we only sink an
1054         // allocation if one of the following is true:
1055         //
1056         // 1) There exists a basic block with only backwards outgoing
1057         //    edges (or no outgoing edges) in which the node wasn't
1058         //    materialized. This is meant to catch
1059         //    effectively-infinite loops in which we don't need to
1060         //    have allocated the object.
1061         //
1062         // 2) There exists a basic block at the tail of which the node
1063         //    is dead and not materialized.
1064         //
1065         // 3) The sum of execution counts of the materializations is
1066         //    less than the sum of execution counts of the original
1067         //    node.
1068         //
1069         // We currently implement only rule #2.
1070         // FIXME: Implement the two other rules.
1071         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1072         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1073         //
1074         // However, these rules allow for a sunk object to be put into
1075         // a non-sunk one, which we don't support. We could solve this
1076         // by supporting PutHints on local allocations, making these
1077         // objects only partially correct, and we would need to adapt
1078         // the OSR availability analysis and OSR exit to handle
1079         // this. This would be totally doable, but would create a
1080         // super rare, and thus bug-prone, code path.
1081         // So, instead, we need to implement one of the following
1082         // closure rules:
1083         //
1084         // 1) If we put a sink candidate into a local allocation that
1085         //    is not a sink candidate, change our minds and don't
1086         //    actually sink the sink candidate.
1087         //
1088         // 2) If we put a sink candidate into a local allocation, that
1089         //    allocation becomes a sink candidate as well.
1090         //
1091         // We currently choose to implement closure rule #2.
1092         HashMap<Node*, Vector<Node*>> dependencies;
1093         bool hasUnescapedReads = false;
1094         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1095             m_heap = m_heapAtHead[block];
1096
1097             for (Node* node : *block) {
1098                 handleNode(
1099                     node,
1100                     [&] (PromotedHeapLocation location, LazyNode value) {
1101                         if (!value.isNode())
1102                             return;
1103
1104                         Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1105                         if (allocation && !allocation->isEscapedAllocation())
1106                             dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1107                     },
1108                     [&] (PromotedHeapLocation) -> Node* {
1109                         hasUnescapedReads = true;
1110                         return nullptr;
1111                     });
1112             }
1113
1114             // The sink candidates are initially the unescaped
1115             // allocations dying at tail of blocks
1116             HashSet<Node*> allocations;
1117             for (const auto& entry : m_heap.allocations()) {
1118                 if (!entry.value.isEscapedAllocation())
1119                     allocations.add(entry.key);
1120             }
1121
1122             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1123
1124             for (Node* identifier : allocations) {
1125                 if (!m_heap.isAllocation(identifier))
1126                     m_sinkCandidates.add(identifier);
1127             }
1128         }
1129
1130         // Ensure that the set of sink candidates is closed for put operations
1131         Vector<Node*> worklist;
1132         worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1133
1134         while (!worklist.isEmpty()) {
1135             for (Node* identifier : dependencies.get(worklist.takeLast())) {
1136                 if (m_sinkCandidates.add(identifier).isNewEntry)
1137                     worklist.append(identifier);
1138             }
1139         }
1140
1141         if (m_sinkCandidates.isEmpty())
1142             return hasUnescapedReads;
1143
1144         if (verbose)
1145             dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1146
1147         // Create the materialization nodes
1148         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1149             m_heap = m_heapAtHead[block];
1150             m_heap.setWantEscapees();
1151
1152             for (Node* node : *block) {
1153                 handleNode(
1154                     node,
1155                     [] (PromotedHeapLocation, LazyNode) { },
1156                     [] (PromotedHeapLocation) -> Node* {
1157                         return nullptr;
1158                     });
1159                 auto escapees = m_heap.takeEscapees();
1160                 if (!escapees.isEmpty())
1161                     placeMaterializations(escapees, node);
1162             }
1163
1164             m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1165
1166             {
1167                 HashMap<Node*, Allocation> escapingOnEdge;
1168                 for (const auto& entry : m_heap.allocations()) {
1169                     if (entry.value.isEscapedAllocation())
1170                         continue;
1171
1172                     bool mustEscape = false;
1173                     for (BasicBlock* successorBlock : block->successors()) {
1174                         if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1175                             || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1176                             mustEscape = true;
1177                     }
1178
1179                     if (mustEscape)
1180                         escapingOnEdge.add(entry.key, entry.value);
1181                 }
1182                 placeMaterializations(WTF::move(escapingOnEdge), block->terminal());
1183             }
1184         }
1185
1186         return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1187     }
1188
1189     void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1190     {
1191         // We don't create materializations if the escapee is not a
1192         // sink candidate
1193         Vector<Node*> toRemove;
1194         for (const auto& entry : escapees) {
1195             if (!m_sinkCandidates.contains(entry.key))
1196                 toRemove.append(entry.key);
1197         }
1198         for (Node* identifier : toRemove)
1199             escapees.remove(identifier);
1200
1201         if (escapees.isEmpty())
1202             return;
1203
1204         // First collect the hints that will be needed when the node
1205         // we materialize is still stored into other unescaped sink candidates
1206         Vector<PromotedHeapLocation> hints;
1207         for (const auto& entry : m_heap.allocations()) {
1208             if (escapees.contains(entry.key))
1209                 continue;
1210
1211             for (const auto& field : entry.value.fields()) {
1212                 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1213                 if (escapees.contains(field.value) && !field.key.neededForMaterialization())
1214                     hints.append(PromotedHeapLocation(entry.key, field.key));
1215             }
1216         }
1217
1218         // Now we need to order the materialization. Any order is
1219         // valid (as long as we materialize a node first if it is
1220         // needed for the materialization of another node, e.g. a
1221         // function's activation must be materialized before the
1222         // function itself), but we want to try minimizing the number
1223         // of times we have to place Puts to close cycles after a
1224         // materialization. In other words, we are trying to find the
1225         // minimum number of materializations to remove from the
1226         // materialization graph to make it a DAG, known as the
1227         // (vertex) feedback set problem. Unfortunately, this is a
1228         // NP-hard problem, which we don't want to solve exactly.
1229         //
1230         // Instead, we use a simple greedy procedure, that procedes as
1231         // follow:
1232         //  - While there is at least one node with no outgoing edge
1233         //    amongst the remaining materializations, materialize it
1234         //    first
1235         //
1236         //  - Similarily, while there is at least one node with no
1237         //    incoming edge amongst the remaining materializations,
1238         //    materialize it last.
1239         //
1240         //  - When both previous conditions are false, we have an
1241         //    actual cycle, and we need to pick a node to
1242         //    materialize. We try greedily to remove the "pressure" on
1243         //    the remaining nodes by choosing the node with maximum
1244         //    |incoming edges| * |outgoing edges| as a measure of how
1245         //    "central" to the graph it is. We materialize it first,
1246         //    so that all the recoveries will be Puts of things into
1247         //    it (rather than Puts of the materialization into other
1248         //    objects), which means we will have a single
1249         //    StoreBarrier.
1250
1251
1252         // Compute dependencies between materializations
1253         HashMap<Node*, HashSet<Node*>> dependencies;
1254         HashMap<Node*, HashSet<Node*>> reverseDependencies;
1255         HashMap<Node*, HashSet<Node*>> forMaterialization;
1256         for (const auto& entry : escapees) {
1257             auto& myDependencies = dependencies.add(entry.key, HashSet<Node*>()).iterator->value;
1258             auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, HashSet<Node*>()).iterator->value;
1259             reverseDependencies.add(entry.key, HashSet<Node*>());
1260             for (const auto& field : entry.value.fields()) {
1261                 if (escapees.contains(field.value) && field.value != entry.key) {
1262                     myDependencies.add(field.value);
1263                     reverseDependencies.add(field.value, HashSet<Node*>()).iterator->value.add(entry.key);
1264                     if (field.key.neededForMaterialization())
1265                         myDependenciesForMaterialization.add(field.value);
1266                 }
1267             }
1268         }
1269
1270         // Helper function to update the materialized set and the
1271         // dependencies
1272         HashSet<Node*> materialized;
1273         auto materialize = [&] (Node* identifier) {
1274             materialized.add(identifier);
1275             for (Node* dep : dependencies.get(identifier))
1276                 reverseDependencies.find(dep)->value.remove(identifier);
1277             for (Node* rdep : reverseDependencies.get(identifier)) {
1278                 dependencies.find(rdep)->value.remove(identifier);
1279                 forMaterialization.find(rdep)->value.remove(identifier);
1280             }
1281             dependencies.remove(identifier);
1282             reverseDependencies.remove(identifier);
1283             forMaterialization.remove(identifier);
1284         };
1285
1286         // Nodes without remaining unmaterialized fields will be
1287         // materialized first - amongst the remaining unmaterialized
1288         // nodes
1289         std::list<Allocation> toMaterialize;
1290         auto firstPos = toMaterialize.begin();
1291         auto materializeFirst = [&] (Allocation&& allocation) {
1292             materialize(allocation.identifier());
1293             // We need to insert *after* the current position
1294             if (firstPos != toMaterialize.end())
1295                 ++firstPos;
1296             firstPos = toMaterialize.insert(firstPos, WTF::move(allocation));
1297         };
1298
1299         // Nodes that no other unmaterialized node points to will be
1300         // materialized last - amongst the remaining unmaterialized
1301         // nodes
1302         auto lastPos = toMaterialize.end();
1303         auto materializeLast = [&] (Allocation&& allocation) {
1304             materialize(allocation.identifier());
1305             lastPos = toMaterialize.insert(lastPos, WTF::move(allocation));
1306         };
1307
1308         // These are the promoted locations that contains some of the
1309         // allocations we are currently escaping. If they are a location on
1310         // some other allocation we are currently materializing, we will need
1311         // to "recover" their value with a real put once the corresponding
1312         // allocation is materialized; if they are a location on some other
1313         // not-yet-materialized allocation, we will need a PutHint.
1314         Vector<PromotedHeapLocation> toRecover;
1315
1316         // This loop does the actual cycle breaking
1317         while (!escapees.isEmpty()) {
1318             materialized.clear();
1319
1320             // Materialize nodes that won't require recoveries if we can
1321             for (auto& entry : escapees) {
1322                 if (!forMaterialization.find(entry.key)->value.isEmpty())
1323                     continue;
1324
1325                 if (dependencies.find(entry.key)->value.isEmpty()) {
1326                     materializeFirst(WTF::move(entry.value));
1327                     continue;
1328                 }
1329
1330                 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1331                     materializeLast(WTF::move(entry.value));
1332                     continue;
1333                 }
1334             }
1335
1336             // We reach this only if there is an actual cycle that needs
1337             // breaking. Because we do not want to solve a NP-hard problem
1338             // here, we just heuristically pick a node and materialize it
1339             // first.
1340             if (materialized.isEmpty()) {
1341                 uint64_t maxEvaluation = 0;
1342                 Allocation* bestAllocation;
1343                 for (auto& entry : escapees) {
1344                     if (!forMaterialization.find(entry.key)->value.isEmpty())
1345                         continue;
1346
1347                     uint64_t evaluation =
1348                         static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1349                     if (evaluation > maxEvaluation) {
1350                         maxEvaluation = evaluation;
1351                         bestAllocation = &entry.value;
1352                     }
1353                 }
1354                 RELEASE_ASSERT(maxEvaluation > 0);
1355
1356                 materializeFirst(WTF::move(*bestAllocation));
1357             }
1358             RELEASE_ASSERT(!materialized.isEmpty());
1359
1360             for (Node* identifier : materialized)
1361                 escapees.remove(identifier);
1362         }
1363
1364         materialized.clear();
1365
1366         HashSet<Node*> escaped;
1367         for (const Allocation& allocation : toMaterialize)
1368             escaped.add(allocation.identifier());
1369         for (const Allocation& allocation : toMaterialize) {
1370             for (const auto& field : allocation.fields()) {
1371                 if (escaped.contains(field.value) && !materialized.contains(field.value))
1372                     toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1373             }
1374             materialized.add(allocation.identifier());
1375         }
1376
1377         Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1378             where, Vector<Node*>()).iterator->value;
1379
1380         for (const Allocation& allocation : toMaterialize) {
1381             Node* materialization = createMaterialization(allocation, where);
1382             materializations.append(materialization);
1383             m_materializationToEscapee.add(materialization, allocation.identifier());
1384         }
1385
1386         if (!toRecover.isEmpty()) {
1387             m_materializationSiteToRecoveries.add(
1388                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1389         }
1390
1391         // The hints need to be after the "real" recoveries so that we
1392         // don't hint not-yet-complete objects
1393         if (!hints.isEmpty()) {
1394             m_materializationSiteToRecoveries.add(
1395                 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(hints);
1396         }
1397     }
1398
1399     Node* createMaterialization(const Allocation& allocation, Node* where)
1400     {
1401         // FIXME: This is the only place where we actually use the
1402         // fact that an allocation's identifier is indeed the node
1403         // that created the allocation.
1404         switch (allocation.kind()) {
1405         case Allocation::Kind::Object: {
1406             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1407             StructureSet* set = m_graph.addStructureSet(allocation.structures());
1408
1409             return m_graph.addNode(
1410                 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1411                 NodeOrigin(
1412                     allocation.identifier()->origin.semantic,
1413                     where->origin.forExit),
1414                 OpInfo(set), OpInfo(data), 0, 0);
1415         }
1416
1417         case Allocation::Kind::Function: {
1418             FrozenValue* executable = allocation.identifier()->cellOperand();
1419
1420             return m_graph.addNode(
1421                 allocation.identifier()->prediction(), NewFunction,
1422                 NodeOrigin(
1423                     allocation.identifier()->origin.semantic,
1424                     where->origin.forExit),
1425                 OpInfo(executable));
1426             break;
1427         }
1428
1429         case Allocation::Kind::Activation: {
1430             ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1431             FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1432
1433             return m_graph.addNode(
1434                 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1435                 NodeOrigin(
1436                     allocation.identifier()->origin.semantic,
1437                     where->origin.forExit),
1438                 OpInfo(symbolTable), OpInfo(data), 0, 0);
1439         }
1440
1441         default:
1442             DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1443         }
1444     }
1445
1446     void promoteLocalHeap()
1447     {
1448         // Collect the set of heap locations that we will be operating
1449         // over.
1450         HashSet<PromotedHeapLocation> locations;
1451         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1452             m_heap = m_heapAtHead[block];
1453
1454             for (Node* node : *block) {
1455                 handleNode(
1456                     node,
1457                     [&] (PromotedHeapLocation location, LazyNode) {
1458                         // If the location is not on a sink candidate,
1459                         // we only sink it if it is read
1460                         if (m_sinkCandidates.contains(location.base()))
1461                             locations.add(location);
1462                     },
1463                     [&] (PromotedHeapLocation location) -> Node* {
1464                         locations.add(location);
1465                         return nullptr;
1466                     });
1467             }
1468         }
1469
1470         // Figure out which locations belong to which allocations.
1471         m_locationsForAllocation.clear();
1472         for (PromotedHeapLocation location : locations) {
1473             auto result = m_locationsForAllocation.add(
1474                 location.base(),
1475                 Vector<PromotedHeapLocation>());
1476             ASSERT(!result.iterator->value.contains(location));
1477             result.iterator->value.append(location);
1478         }
1479
1480         m_pointerSSA.reset();
1481         m_allocationSSA.reset();
1482
1483         // Collect the set of "variables" that we will be sinking.
1484         m_locationToVariable.clear();
1485         m_nodeToVariable.clear();
1486         Vector<Node*> indexToNode;
1487         Vector<PromotedHeapLocation> indexToLocation;
1488
1489         for (Node* index : m_sinkCandidates) {
1490             SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1491             m_nodeToVariable.add(index, variable);
1492             ASSERT(indexToNode.size() == variable->index());
1493             indexToNode.append(index);
1494         }
1495
1496         for (PromotedHeapLocation location : locations) {
1497             SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1498             m_locationToVariable.add(location, variable);
1499             ASSERT(indexToLocation.size() == variable->index());
1500             indexToLocation.append(location);
1501         }
1502
1503         // We insert all required constants at top of block 0 so that
1504         // they are inserted only once and we don't clutter the graph
1505         // with useless constants everywhere
1506         HashMap<FrozenValue*, Node*> lazyMapping;
1507         if (!m_bottom)
1508             m_bottom = m_insertionSet.insertConstant(0, NodeOrigin(), jsNumber(1927));
1509         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1510             m_heap = m_heapAtHead[block];
1511
1512             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1513                 Node* node = block->at(nodeIndex);
1514
1515                 // Some named properties can be added conditionally,
1516                 // and that would necessitate bottoms
1517                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1518                     if (location.kind() != NamedPropertyPLoc)
1519                         continue;
1520
1521                     SSACalculator::Variable* variable = m_locationToVariable.get(location);
1522                     m_pointerSSA.newDef(variable, block, m_bottom);
1523                 }
1524
1525                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1526                     Node* escapee = m_materializationToEscapee.get(materialization);
1527                     m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1528                 }
1529
1530                 if (m_sinkCandidates.contains(node))
1531                     m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1532
1533                 handleNode(
1534                     node,
1535                     [&] (PromotedHeapLocation location, LazyNode value) {
1536                         if (!locations.contains(location))
1537                             return;
1538
1539                         Node* nodeValue;
1540                         if (value.isNode())
1541                             nodeValue = value.asNode();
1542                         else {
1543                             auto iter = lazyMapping.find(value.asValue());
1544                             if (iter != lazyMapping.end())
1545                                 nodeValue = iter->value;
1546                             else {
1547                                 nodeValue = value.ensureIsNode(
1548                                     m_insertionSet, m_graph.block(0), 0);
1549                                 lazyMapping.add(value.asValue(), nodeValue);
1550                             }
1551                         }
1552
1553                         SSACalculator::Variable* variable = m_locationToVariable.get(location);
1554                         m_pointerSSA.newDef(variable, block, nodeValue);
1555                     },
1556                     [] (PromotedHeapLocation) -> Node* {
1557                         return nullptr;
1558                     });
1559             }
1560         }
1561         m_insertionSet.execute(m_graph.block(0));
1562
1563         // Run the SSA calculators to create Phis
1564         m_pointerSSA.computePhis(
1565             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1566                 PromotedHeapLocation location = indexToLocation[variable->index()];
1567
1568                 // Don't create Phi nodes for fields of dead allocations
1569                 if (!m_heapAtHead[block].isAllocation(location.base()))
1570                     return nullptr;
1571
1572                 // Don't create Phi nodes once we are escaped
1573                 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1574                     return nullptr;
1575
1576                 // If we point to a single allocation, we will
1577                 // directly use its materialization
1578                 if (m_heapAtHead[block].follow(location))
1579                     return nullptr;
1580
1581                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
1582                 phiNode->mergeFlags(NodeResultJS);
1583                 return phiNode;
1584             });
1585
1586         m_allocationSSA.computePhis(
1587             [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1588                 Node* identifier = indexToNode[variable->index()];
1589
1590                 // Don't create Phi nodes for dead allocations
1591                 if (!m_heapAtHead[block].isAllocation(identifier))
1592                     return nullptr;
1593
1594                 // Don't create Phi nodes until we are escaped
1595                 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1596                     return nullptr;
1597
1598                 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
1599                 phiNode->mergeFlags(NodeResultJS);
1600                 return phiNode;
1601             });
1602
1603         // Place Phis in the right places, replace all uses of any load with the appropriate
1604         // value, and create the materialization nodes.
1605         LocalOSRAvailabilityCalculator availabilityCalculator;
1606         m_graph.clearReplacements();
1607         for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1608             m_heap = m_heapAtHead[block];
1609             availabilityCalculator.beginBlock(block);
1610
1611             // These mapping tables are intended to be lazy. If
1612             // something is omitted from the table, it means that
1613             // there haven't been any local stores to the promoted
1614             // heap location (or any local materialization).
1615             m_localMapping.clear();
1616             m_escapeeToMaterialization.clear();
1617
1618             // Insert the Phi functions that we had previously
1619             // created.
1620             for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1621                 SSACalculator::Variable* variable = phiDef->variable();
1622                 m_insertionSet.insert(0, phiDef->value());
1623
1624                 PromotedHeapLocation location = indexToLocation[variable->index()];
1625                 m_localMapping.set(location, phiDef->value());
1626
1627                 if (m_sinkCandidates.contains(location.base())) {
1628                     m_insertionSet.insert(
1629                         0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
1630                 }
1631             }
1632
1633             for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1634                 SSACalculator::Variable* variable = phiDef->variable();
1635                 m_insertionSet.insert(0, phiDef->value());
1636
1637                 Node* identifier = indexToNode[variable->index()];
1638                 m_escapeeToMaterialization.add(identifier, phiDef->value());
1639                 insertOSRHintsForUpdate(0, NodeOrigin(), availabilityCalculator.m_availability, identifier, phiDef->value());
1640             }
1641
1642             if (verbose) {
1643                 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1644                 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1645             }
1646
1647             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1648                 Node* node = block->at(nodeIndex);
1649                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1650                     if (location.kind() != NamedPropertyPLoc)
1651                         continue;
1652
1653                     m_localMapping.set(location, m_bottom);
1654
1655                     if (m_sinkCandidates.contains(node)) {
1656                         m_insertionSet.insert(
1657                             nodeIndex + 1,
1658                             location.createHint(m_graph, node->origin, m_bottom));
1659                     }
1660                 }
1661
1662                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1663                     Node* escapee = m_materializationToEscapee.get(materialization);
1664                     populateMaterialization(block, materialization, escapee);
1665                     m_escapeeToMaterialization.set(escapee, materialization);
1666                     m_insertionSet.insert(nodeIndex, materialization);
1667                     if (verbose)
1668                         dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1669                 }
1670
1671                 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1672                     m_insertionSet.insert(nodeIndex, createRecovery(block, location, node));
1673
1674                 // We need to put the OSR hints after the recoveries,
1675                 // because we only want the hints once the object is
1676                 // complete
1677                 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1678                     Node* escapee = m_materializationToEscapee.get(materialization);
1679                     insertOSRHintsForUpdate(
1680                         nodeIndex, node->origin,
1681                         availabilityCalculator.m_availability, escapee, materialization);
1682                 }
1683
1684                 if (m_sinkCandidates.contains(node))
1685                     m_escapeeToMaterialization.set(node, node);
1686
1687                 availabilityCalculator.executeNode(node);
1688
1689                 bool doLower = false;
1690                 handleNode(
1691                     node,
1692                     [&] (PromotedHeapLocation location, LazyNode value) {
1693                         if (!locations.contains(location))
1694                             return;
1695
1696                         Node* nodeValue;
1697                         if (value.isNode())
1698                             nodeValue = value.asNode();
1699                         else
1700                             nodeValue = lazyMapping.get(value.asValue());
1701
1702                         nodeValue = resolve(block, nodeValue);
1703
1704                         m_localMapping.set(location, nodeValue);
1705
1706                         if (!m_sinkCandidates.contains(location.base()))
1707                             return;
1708
1709                         doLower = true;
1710
1711                         m_insertionSet.insert(nodeIndex + 1,
1712                             location.createHint(m_graph, node->origin, nodeValue));
1713                     },
1714                     [&] (PromotedHeapLocation location) -> Node* {
1715                         return resolve(block, location);
1716                     });
1717
1718                 if (m_sinkCandidates.contains(node) || doLower) {
1719                     switch (node->op()) {
1720                     case NewObject:
1721                     case MaterializeNewObject:
1722                         node->convertToPhantomNewObject();
1723                         break;
1724
1725                     case NewFunction:
1726                         node->convertToPhantomNewFunction();
1727                         break;
1728
1729                     case CreateActivation:
1730                     case MaterializeCreateActivation:
1731                         node->convertToPhantomCreateActivation();
1732                         break;
1733
1734                     default:
1735                         node->remove();
1736                         break;
1737                     }
1738                 }
1739
1740                 m_graph.doToChildren(
1741                     node,
1742                     [&] (Edge& edge) {
1743                         edge.setNode(resolve(block, edge.node()));
1744                     });
1745             }
1746
1747             // Gotta drop some Upsilons.
1748             NodeAndIndex terminal = block->findTerminal();
1749             size_t upsilonInsertionPoint = terminal.index;
1750             NodeOrigin upsilonOrigin = terminal.node->origin;
1751             for (BasicBlock* successorBlock : block->successors()) {
1752                 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1753                     Node* phiNode = phiDef->value();
1754                     SSACalculator::Variable* variable = phiDef->variable();
1755                     PromotedHeapLocation location = indexToLocation[variable->index()];
1756                     Node* incoming = resolve(block, location);
1757
1758                     m_insertionSet.insertNode(
1759                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1760                         OpInfo(phiNode), incoming->defaultEdge());
1761                 }
1762
1763                 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1764                     Node* phiNode = phiDef->value();
1765                     SSACalculator::Variable* variable = phiDef->variable();
1766                     Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1767
1768                     m_insertionSet.insertNode(
1769                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1770                         OpInfo(phiNode), incoming->defaultEdge());
1771                 }
1772             }
1773
1774             m_insertionSet.execute(block);
1775         }
1776     }
1777
1778     Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1779     {
1780         // If we are currently pointing to a single local allocation,
1781         // simply return the associated materialization.
1782         if (Node* identifier = m_heap.follow(location))
1783             return getMaterialization(block, identifier);
1784
1785         if (Node* result = m_localMapping.get(location))
1786             return result;
1787
1788         // This implies that there is no local mapping. Find a non-local mapping.
1789         SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1790             block, m_locationToVariable.get(location));
1791         ASSERT(def);
1792         ASSERT(def->value());
1793
1794         Node* result = def->value();
1795
1796         ASSERT(!result->replacement());
1797
1798         m_localMapping.add(location, result);
1799         return result;
1800     }
1801
1802     Node* resolve(BasicBlock* block, Node* node)
1803     {
1804         // If we are currently pointing to a single local allocation,
1805         // simply return the associated materialization.
1806         if (Node* identifier = m_heap.follow(node))
1807             return getMaterialization(block, identifier);
1808
1809         if (node->replacement())
1810             node = node->replacement();
1811         ASSERT(!node->replacement());
1812
1813         return node;
1814     }
1815
1816     Node* getMaterialization(BasicBlock* block, Node* identifier)
1817     {
1818         ASSERT(m_heap.isAllocation(identifier));
1819         if (!m_sinkCandidates.contains(identifier))
1820             return identifier;
1821
1822         if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1823             return materialization;
1824
1825         SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1826             block, m_nodeToVariable.get(identifier));
1827         ASSERT(def && def->value());
1828         m_escapeeToMaterialization.add(identifier, def->value());
1829         ASSERT(!def->value()->replacement());
1830         return def->value();
1831     }
1832
1833     void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, AvailabilityMap& availability, Node* escapee, Node* materialization)
1834     {
1835         // We need to follow() the value in the heap.
1836         // Consider the following graph:
1837         //
1838         // Block #0
1839         //   0: NewObject({})
1840         //   1: NewObject({})
1841         //   -: PutByOffset(@0, @1, x:0)
1842         //   -: PutStructure(@0, {x:0})
1843         //   2: GetByOffset(@0, x:0)
1844         //   -: MovHint(@2, loc1)
1845         //   -: Branch(#1, #2)
1846         //
1847         // Block #1
1848         //   3: Call(f, @1)
1849         //   4: Return(@0)
1850         //
1851         // Block #2
1852         //   -: Return(undefined)
1853         //
1854         // We need to materialize @1 at @3, and when doing so we need
1855         // to insert a MovHint for the materialization into loc1 as
1856         // well.
1857         // In order to do this, we say that we need to insert an
1858         // update hint for any availability whose node resolve()s to
1859         // the materialization.
1860         for (auto entry : availability.m_heap) {
1861             if (!entry.value.hasNode())
1862                 continue;
1863             if (m_heap.follow(entry.value.node()) != escapee)
1864                 continue;
1865
1866             m_insertionSet.insert(
1867                 nodeIndex, entry.key.createHint(m_graph, origin, materialization));
1868         }
1869
1870         for (unsigned i = availability.m_locals.size(); i--;) {
1871             if (!availability.m_locals[i].hasNode())
1872                 continue;
1873             if (m_heap.follow(availability.m_locals[i].node()) != escapee)
1874                 continue;
1875
1876             int operand = availability.m_locals.operandForIndex(i);
1877             m_insertionSet.insertNode(
1878                 nodeIndex, SpecNone, MovHint, origin, OpInfo(operand),
1879                 materialization->defaultEdge());
1880         }
1881     }
1882
1883     void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
1884     {
1885         Allocation& allocation = m_heap.getAllocation(escapee);
1886         switch (node->op()) {
1887         case MaterializeNewObject: {
1888             ObjectMaterializationData& data = node->objectMaterializationData();
1889             unsigned firstChild = m_graph.m_varArgChildren.size();
1890
1891             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1892
1893             PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
1894             ASSERT(locations.contains(structure));
1895
1896             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
1897
1898             for (PromotedHeapLocation location : locations) {
1899                 switch (location.kind()) {
1900                 case StructurePLoc:
1901                     ASSERT(location == structure);
1902                     break;
1903
1904                 case NamedPropertyPLoc: {
1905                     ASSERT(location.base() == allocation.identifier());
1906                     data.m_properties.append(PhantomPropertyValue(location.info()));
1907                     Node* value = resolve(block, location);
1908                     if (m_sinkCandidates.contains(value))
1909                         m_graph.m_varArgChildren.append(m_bottom);
1910                     else
1911                         m_graph.m_varArgChildren.append(value);
1912                     break;
1913                 }
1914
1915                 default:
1916                     DFG_CRASH(m_graph, node, "Bad location kind");
1917                 }
1918             }
1919
1920             node->children = AdjacencyList(
1921                 AdjacencyList::Variable,
1922                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1923             break;
1924         }
1925
1926         case MaterializeCreateActivation: {
1927             ObjectMaterializationData& data = node->objectMaterializationData();
1928
1929             unsigned firstChild = m_graph.m_varArgChildren.size();
1930
1931             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1932
1933             PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
1934             ASSERT(locations.contains(symbolTable));
1935             ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
1936             m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
1937
1938             PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
1939             ASSERT(locations.contains(scope));
1940             m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
1941
1942             for (PromotedHeapLocation location : locations) {
1943                 switch (location.kind()) {
1944                 case ActivationScopePLoc: {
1945                     ASSERT(location == scope);
1946                     break;
1947                 }
1948
1949                 case ActivationSymbolTablePLoc: {
1950                     ASSERT(location == symbolTable);
1951                     break;
1952                 }
1953
1954                 case ClosureVarPLoc: {
1955                     ASSERT(location.base() == allocation.identifier());
1956                     data.m_properties.append(PhantomPropertyValue(location.info()));
1957                     Node* value = resolve(block, location);
1958                     if (m_sinkCandidates.contains(value))
1959                         m_graph.m_varArgChildren.append(m_bottom);
1960                     else
1961                         m_graph.m_varArgChildren.append(value);
1962                     break;
1963                 }
1964
1965                 default:
1966                     DFG_CRASH(m_graph, node, "Bad location kind");
1967                 }
1968             }
1969
1970             node->children = AdjacencyList(
1971                 AdjacencyList::Variable,
1972                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1973             break;
1974         }
1975
1976         case NewFunction: {
1977             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1978             ASSERT(locations.size() == 2);
1979
1980             PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
1981             ASSERT_UNUSED(executable, locations.contains(executable));
1982
1983             PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
1984             ASSERT(locations.contains(activation));
1985
1986             node->child1() = Edge(resolve(block, activation), KnownCellUse);
1987             break;
1988         }
1989
1990         default:
1991             DFG_CRASH(m_graph, node, "Bad materialize op");
1992         }
1993     }
1994
1995     Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where)
1996     {
1997         if (verbose)
1998             dataLog("Recovering ", location, " at ", where, "\n");
1999         ASSERT(location.base()->isPhantomAllocation());
2000         Node* base = getMaterialization(block, location.base());
2001         Node* value = resolve(block, location);
2002
2003         if (verbose)
2004             dataLog("Base is ", base, " and value is ", value, "\n");
2005
2006         if (base->isPhantomAllocation()) {
2007             return PromotedHeapLocation(base, location.descriptor()).createHint(
2008                 m_graph,
2009                 NodeOrigin(
2010                     base->origin.semantic,
2011                     where->origin.forExit),
2012                 value);
2013         }
2014
2015         switch (location.kind()) {
2016         case NamedPropertyPLoc: {
2017             Allocation& allocation = m_heap.getAllocation(location.base());
2018
2019             Vector<Structure*> structures;
2020             structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2021             unsigned identifierNumber = location.info();
2022             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2023
2024             std::sort(
2025                 structures.begin(),
2026                 structures.end(),
2027                 [uid] (Structure *a, Structure* b) -> bool {
2028                     return a->getConcurrently(uid) < b->getConcurrently(uid);
2029                 });
2030
2031             PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2032
2033             if (firstOffset == structures.last()->getConcurrently(uid)) {
2034                 Node* storage = base;
2035                 // FIXME: When we decide to sink objects with a
2036                 // property storage, we should handle non-inline offsets.
2037                 RELEASE_ASSERT(isInlineOffset(firstOffset));
2038
2039                 StorageAccessData* data = m_graph.m_storageAccessData.add();
2040                 data->offset = firstOffset;
2041                 data->identifierNumber = identifierNumber;
2042
2043                 return m_graph.addNode(
2044                     SpecNone,
2045                     PutByOffset,
2046                     where->origin,
2047                     OpInfo(data),
2048                     Edge(storage, KnownCellUse),
2049                     Edge(base, KnownCellUse),
2050                     value->defaultEdge());
2051             }
2052
2053             MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2054             data->identifierNumber = identifierNumber;
2055
2056             {
2057                 PropertyOffset currentOffset = firstOffset;
2058                 StructureSet currentSet;
2059                 for (Structure* structure : structures) {
2060                     PropertyOffset offset = structure->getConcurrently(uid);
2061                     if (offset != currentOffset) {
2062                         data->variants.append(
2063                             PutByIdVariant::replace(currentSet, currentOffset));
2064                         currentOffset = offset;
2065                         currentSet.clear();
2066                     }
2067                     currentSet.add(structure);
2068                 }
2069                 data->variants.append(PutByIdVariant::replace(currentSet, currentOffset));
2070             }
2071
2072             return m_graph.addNode(
2073                 SpecNone,
2074                 MultiPutByOffset,
2075                 NodeOrigin(
2076                     base->origin.semantic,
2077                     where->origin.forExit),
2078                 OpInfo(data),
2079                 Edge(base, KnownCellUse),
2080                 value->defaultEdge());
2081             break;
2082         }
2083
2084         case ClosureVarPLoc: {
2085             return m_graph.addNode(
2086                 SpecNone,
2087                 PutClosureVar,
2088                 NodeOrigin(
2089                     base->origin.semantic,
2090                     where->origin.forExit),
2091                 OpInfo(location.info()),
2092                 Edge(base, KnownCellUse),
2093                 value->defaultEdge());
2094             break;
2095         }
2096
2097         default:
2098             DFG_CRASH(m_graph, base, "Bad location kind");
2099             break;
2100         }
2101     }
2102
2103     SSACalculator m_pointerSSA;
2104     SSACalculator m_allocationSSA;
2105     HashSet<Node*> m_sinkCandidates;
2106     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2107     HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2108     HashMap<PromotedHeapLocation, Node*> m_localMapping;
2109     HashMap<Node*, Node*> m_escapeeToMaterialization;
2110     InsertionSet m_insertionSet;
2111     CombinedLiveness m_combinedLiveness;
2112
2113     HashMap<Node*, Node*> m_materializationToEscapee;
2114     HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2115     HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2116
2117     HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2118
2119     BlockMap<LocalHeap> m_heapAtHead;
2120     BlockMap<LocalHeap> m_heapAtTail;
2121     LocalHeap m_heap;
2122
2123     Node* m_bottom = nullptr;
2124 };
2125
2126 }
2127
2128 bool performObjectAllocationSinking(Graph& graph)
2129 {
2130     SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
2131     return runPhase<ObjectAllocationSinkingPhase>(graph);
2132 }
2133
2134 } } // namespace JSC::DFG
2135
2136 #endif // ENABLE(DFG_JIT)