Try ripping out inferred types because it might be a performance improvement
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGObjectAllocationSinkingPhase.cpp
index 1b7b683..b99a4dd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -20,7 +20,7 @@
  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "config.h"
 
 #if ENABLE(DFG_JIT)
 
-#include "DFGAbstractHeap.h"
 #include "DFGBlockMapInlines.h"
-#include "DFGClobberize.h"
+#include "DFGClobbersExitState.h"
+#include "DFGCombinedLiveness.h"
 #include "DFGGraph.h"
-#include "DFGInsertOSRHintsForUpdate.h"
 #include "DFGInsertionSet.h"
+#include "DFGLazyNode.h"
 #include "DFGLivenessAnalysisPhase.h"
 #include "DFGOSRAvailabilityAnalysisPhase.h"
 #include "DFGPhase.h"
-#include "DFGPromoteHeapAccess.h"
+#include "DFGPromotedHeapLocation.h"
 #include "DFGSSACalculator.h"
 #include "DFGValidate.h"
 #include "JSCInlines.h"
+#include <wtf/StdList.h>
 
 namespace JSC { namespace DFG {
 
-static bool verbose = false;
+namespace {
+
+namespace DFGObjectAllocationSinkingPhaseInternal {
+static const bool verbose = false;
+}
+
+// In order to sink object cycles, we use a points-to analysis coupled
+// with an escape analysis. This analysis is actually similar to an
+// abstract interpreter focused on local allocations and ignoring
+// everything else.
+//
+// We represent the local heap using two mappings:
+//
+// - A set of the local allocations present in the function, where
+//   each of those have a further mapping from
+//   PromotedLocationDescriptor to local allocations they must point
+//   to.
+//
+// - A "pointer" mapping from nodes to local allocations, if they must
+//   be equal to said local allocation and are currently live. This
+//   can be because the node is the actual node that created the
+//   allocation, or any other node that must currently point to it -
+//   we don't make a difference.
+//
+// The following graph is a motivation for why we separate allocations
+// from pointers:
+//
+// Block #0
+//  0: NewObject({})
+//  1: NewObject({})
+//  -: PutByOffset(@0, @1, x)
+//  -: PutStructure(@0, {x:0})
+//  2: GetByOffset(@0, x)
+//  -: Jump(#1)
+//
+// Block #1
+//  -: Return(@2)
+//
+// Here, we need to remember in block #1 that @2 points to a local
+// allocation with appropriate fields and structures information
+// (because we should be able to place a materialization on top of
+// block #1 here), even though @1 is dead. We *could* just keep @1
+// artificially alive here, but there is no real reason to do it:
+// after all, by the end of block #0, @1 and @2 should be completely
+// interchangeable, and there is no reason for us to artificially make
+// @1 more important.
+//
+// An important point to consider to understand this separation is
+// that we should think of the local heap as follow: we have a
+// bunch of nodes that are pointers to "allocations" that live
+// someplace on the heap, and those allocations can have pointers in
+// between themselves as well. We shouldn't care about whatever
+// names we give to the allocations ; what matters when
+// comparing/merging two heaps is the isomorphism/comparison between
+// the allocation graphs as seen by the nodes.
+//
+// For instance, in the following graph:
+//
+// Block #0
+//  0: NewObject({})
+//  -: Branch(#1, #2)
+//
+// Block #1
+//  1: NewObject({})
+//  -: PutByOffset(@0, @1, x)
+//  -: PutStructure(@0, {x:0})
+//  -: Jump(#3)
+//
+// Block #2
+//  2: NewObject({})
+//  -: PutByOffset(@2, undefined, x)
+//  -: PutStructure(@2, {x:0})
+//  -: PutByOffset(@0, @2, x)
+//  -: PutStructure(@0, {x:0})
+//  -: Jump(#3)
+//
+// Block #3
+//  -: Return(@0)
+//
+// we should think of the heaps at tail of blocks #1 and #2 as being
+// exactly the same, even though one has @0.x pointing to @1 and the
+// other has @0.x pointing to @2, because in essence this should not
+// be different from the graph where we hoisted @1 and @2 into a
+// single allocation in block #0. We currently will not handle this
+// case, because we merge allocations based on the node they are
+// coming from, but this is only a technicality for the sake of
+// simplicity that shouldn't hide the deeper idea outlined here.
+
+class Allocation {
+public:
+    // We use Escaped as a special allocation kind because when we
+    // decide to sink an allocation, we still need to keep track of it
+    // once it is escaped if it still has pointers to it in order to
+    // replace any use of those pointers by the corresponding
+    // materialization
+    enum class Kind { Escaped, Object, Activation, Function, GeneratorFunction, AsyncFunction, AsyncGeneratorFunction, RegExpObject };
+
+    using Fields = HashMap<PromotedLocationDescriptor, Node*>;
+
+    explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
+        : m_identifier(identifier)
+        , m_kind(kind)
+    {
+    }
+
+
+    const Fields& fields() const
+    {
+        return m_fields;
+    }
+
+    Fields& fields()
+    {
+        return m_fields;
+    }
+
+    Node* get(PromotedLocationDescriptor descriptor)
+    {
+        return m_fields.get(descriptor);
+    }
+
+    Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
+    {
+        // Pointing to anything else than an unescaped local
+        // allocation is represented by simply not having the
+        // field
+        if (value)
+            m_fields.set(descriptor, value);
+        else
+            m_fields.remove(descriptor);
+        return *this;
+    }
+
+    void remove(PromotedLocationDescriptor descriptor)
+    {
+        set(descriptor, nullptr);
+    }
+
+    bool hasStructures() const
+    {
+        switch (kind()) {
+        case Kind::Object:
+            return true;
+
+        default:
+            return false;
+        }
+    }
+
+    Allocation& setStructures(const RegisteredStructureSet& structures)
+    {
+        ASSERT(hasStructures() && !structures.isEmpty());
+        m_structures = structures;
+        return *this;
+    }
+
+    Allocation& mergeStructures(const RegisteredStructureSet& structures)
+    {
+        ASSERT(hasStructures() || structures.isEmpty());
+        m_structures.merge(structures);
+        return *this;
+    }
+
+    Allocation& filterStructures(const RegisteredStructureSet& structures)
+    {
+        ASSERT(hasStructures());
+        m_structures.filter(structures);
+        RELEASE_ASSERT(!m_structures.isEmpty());
+        return *this;
+    }
+
+    const RegisteredStructureSet& structures() const
+    {
+        return m_structures;
+    }
+
+    Node* identifier() const { return m_identifier; }
+
+    Kind kind() const { return m_kind; }
+
+    bool isEscapedAllocation() const
+    {
+        return kind() == Kind::Escaped;
+    }
+
+    bool isObjectAllocation() const
+    {
+        return m_kind == Kind::Object;
+    }
+
+    bool isActivationAllocation() const
+    {
+        return m_kind == Kind::Activation;
+    }
+
+    bool isFunctionAllocation() const
+    {
+        return m_kind == Kind::Function || m_kind == Kind::GeneratorFunction || m_kind == Kind::AsyncFunction;
+    }
+
+    bool isRegExpObjectAllocation() const
+    {
+        return m_kind == Kind::RegExpObject;
+    }
+
+    bool operator==(const Allocation& other) const
+    {
+        return m_identifier == other.m_identifier
+            && m_kind == other.m_kind
+            && m_fields == other.m_fields
+            && m_structures == other.m_structures;
+    }
+
+    bool operator!=(const Allocation& other) const
+    {
+        return !(*this == other);
+    }
+
+    void dump(PrintStream& out) const
+    {
+        dumpInContext(out, nullptr);
+    }
+
+    void dumpInContext(PrintStream& out, DumpContext* context) const
+    {
+        switch (m_kind) {
+        case Kind::Escaped:
+            out.print("Escaped");
+            break;
+
+        case Kind::Object:
+            out.print("Object");
+            break;
+
+        case Kind::Function:
+            out.print("Function");
+            break;
+
+        case Kind::GeneratorFunction:
+            out.print("GeneratorFunction");
+            break;
+
+        case Kind::AsyncFunction:
+            out.print("AsyncFunction");
+            break;
+
+        case Kind::AsyncGeneratorFunction:
+            out.print("AsyncGeneratorFunction");
+            break;
+
+        case Kind::Activation:
+            out.print("Activation");
+            break;
+
+        case Kind::RegExpObject:
+            out.print("RegExpObject");
+            break;
+        }
+        out.print("Allocation(");
+        if (!m_structures.isEmpty())
+            out.print(inContext(m_structures.toStructureSet(), context));
+        if (!m_fields.isEmpty()) {
+            if (!m_structures.isEmpty())
+                out.print(", ");
+            out.print(mapDump(m_fields, " => #", ", "));
+        }
+        out.print(")");
+    }
+
+private:
+    Node* m_identifier; // This is the actual node that created the allocation
+    Kind m_kind;
+    Fields m_fields;
+    RegisteredStructureSet m_structures;
+};
+
+class LocalHeap {
+public:
+    Allocation& newAllocation(Node* node, Allocation::Kind kind)
+    {
+        ASSERT(!m_pointers.contains(node) && !isAllocation(node));
+        m_pointers.add(node, node);
+        return m_allocations.set(node, Allocation(node, kind)).iterator->value;
+    }
+
+    bool isAllocation(Node* identifier) const
+    {
+        return m_allocations.contains(identifier);
+    }
+
+    // Note that this is fundamentally different from
+    // onlyLocalAllocation() below. getAllocation() takes as argument
+    // a node-as-identifier, that is, an allocation node. This
+    // allocation node doesn't have to be alive; it may only be
+    // pointed to by other nodes or allocation fields.
+    // For instance, in the following graph:
+    //
+    // Block #0
+    //  0: NewObject({})
+    //  1: NewObject({})
+    //  -: PutByOffset(@0, @1, x)
+    //  -: PutStructure(@0, {x:0})
+    //  2: GetByOffset(@0, x)
+    //  -: Jump(#1)
+    //
+    // Block #1
+    //  -: Return(@2)
+    //
+    // At head of block #1, the only reachable allocation is #@1,
+    // which can be reached through node @2. Thus, getAllocation(#@1)
+    // contains the appropriate metadata for this allocation, but
+    // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
+    // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
+    // is the same as getAllocation(#@1), while getAllocation(#@2)
+    // does not make sense since @2 is not an allocation node.
+    //
+    // This is meant to be used when the node is already known to be
+    // an identifier (i.e. an allocation) - probably because it was
+    // found as value of a field or pointer in the current heap, or
+    // was the result of a call to follow(). In any other cases (such
+    // as when doing anything while traversing the graph), the
+    // appropriate function to call is probably onlyLocalAllocation.
+    Allocation& getAllocation(Node* identifier)
+    {
+        auto iter = m_allocations.find(identifier);
+        ASSERT(iter != m_allocations.end());
+        return iter->value;
+    }
+
+    void newPointer(Node* node, Node* identifier)
+    {
+        ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
+        ASSERT(isAllocation(identifier));
+        m_pointers.add(node, identifier);
+    }
+
+    // follow solves the points-to problem. Given a live node, which
+    // may be either an allocation itself or a heap read (e.g. a
+    // GetByOffset node), it returns the corresponding allocation
+    // node, if there is one. If the argument node is neither an
+    // allocation or a heap read, or may point to different nodes,
+    // nullptr will be returned. Note that a node that points to
+    // different nodes can never point to an unescaped local
+    // allocation.
+    Node* follow(Node* node) const
+    {
+        auto iter = m_pointers.find(node);
+        ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
+        return iter == m_pointers.end() ? nullptr : iter->value;
+    }
+
+    Node* follow(PromotedHeapLocation location) const
+    {
+        const Allocation& base = m_allocations.find(location.base())->value;
+        auto iter = base.fields().find(location.descriptor());
+
+        if (iter == base.fields().end())
+            return nullptr;
+
+        return iter->value;
+    }
+
+    // onlyLocalAllocation find the corresponding allocation metadata
+    // for any live node. onlyLocalAllocation(node) is essentially
+    // getAllocation(follow(node)), with appropriate null handling.
+    Allocation* onlyLocalAllocation(Node* node)
+    {
+        Node* identifier = follow(node);
+        if (!identifier)
+            return nullptr;
+
+        return &getAllocation(identifier);
+    }
+
+    Allocation* onlyLocalAllocation(PromotedHeapLocation location)
+    {
+        Node* identifier = follow(location);
+        if (!identifier)
+            return nullptr;
+
+        return &getAllocation(identifier);
+    }
+
+    // This allows us to store the escapees only when necessary. If
+    // set, the current escapees can be retrieved at any time using
+    // takeEscapees(), which will clear the cached set of escapees;
+    // otherwise the heap won't remember escaping allocations.
+    void setWantEscapees()
+    {
+        m_wantEscapees = true;
+    }
+
+    HashMap<Node*, Allocation> takeEscapees()
+    {
+        return WTFMove(m_escapees);
+    }
+
+    void escape(Node* node)
+    {
+        Node* identifier = follow(node);
+        if (!identifier)
+            return;
+
+        escapeAllocation(identifier);
+    }
+
+    void merge(const LocalHeap& other)
+    {
+        assertIsValid();
+        other.assertIsValid();
+        ASSERT(!m_wantEscapees);
+
+        if (!reached()) {
+            ASSERT(other.reached());
+            *this = other;
+            return;
+        }
+
+        NodeSet toEscape;
+
+        for (auto& allocationEntry : other.m_allocations)
+            m_allocations.add(allocationEntry.key, allocationEntry.value);
+        for (auto& allocationEntry : m_allocations) {
+            auto allocationIter = other.m_allocations.find(allocationEntry.key);
+
+            // If we have it and they don't, it died for them but we
+            // are keeping it alive from another field somewhere.
+            // There is nothing to do - we will be escaped
+            // automatically when we handle that other field.
+            // This will also happen for allocation that we have and
+            // they don't, and all of those will get pruned.
+            if (allocationIter == other.m_allocations.end())
+                continue;
+
+            if (allocationEntry.value.kind() != allocationIter->value.kind()) {
+                toEscape.addVoid(allocationEntry.key);
+                for (const auto& fieldEntry : allocationIter->value.fields())
+                    toEscape.addVoid(fieldEntry.value);
+            } else {
+                mergePointerSets(allocationEntry.value.fields(), allocationIter->value.fields(), toEscape);
+                allocationEntry.value.mergeStructures(allocationIter->value.structures());
+            }
+        }
+
+        mergePointerSets(m_pointers, other.m_pointers, toEscape);
+
+        for (Node* identifier : toEscape)
+            escapeAllocation(identifier);
+
+        if (!ASSERT_DISABLED) {
+            for (const auto& entry : m_allocations)
+                ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
+        }
+
+        // If there is no remaining pointer to an allocation, we can
+        // remove it. This should only happen for escaped allocations,
+        // because we only merge liveness-pruned heaps in the first
+        // place.
+        prune();
+
+        assertIsValid();
+    }
+
+    void pruneByLiveness(const NodeSet& live)
+    {
+        m_pointers.removeIf(
+            [&] (const auto& entry) {
+                return !live.contains(entry.key);
+            });
+        prune();
+    }
+
+    void assertIsValid() const
+    {
+        if (ASSERT_DISABLED)
+            return;
+
+        // Pointers should point to an actual allocation
+        for (const auto& entry : m_pointers) {
+            ASSERT_UNUSED(entry, entry.value);
+            ASSERT(m_allocations.contains(entry.value));
+        }
+
+        for (const auto& allocationEntry : m_allocations) {
+            // Fields should point to an actual allocation
+            for (const auto& fieldEntry : allocationEntry.value.fields()) {
+                ASSERT_UNUSED(fieldEntry, fieldEntry.value);
+                ASSERT(m_allocations.contains(fieldEntry.value));
+            }
+        }
+    }
+
+    bool operator==(const LocalHeap& other) const
+    {
+        assertIsValid();
+        other.assertIsValid();
+        return m_allocations == other.m_allocations
+            && m_pointers == other.m_pointers;
+    }
+
+    bool operator!=(const LocalHeap& other) const
+    {
+        return !(*this == other);
+    }
+
+    const HashMap<Node*, Allocation>& allocations() const
+    {
+        return m_allocations;
+    }
+
+    const HashMap<Node*, Node*>& pointers() const
+    {
+        return m_pointers;
+    }
+
+    void dump(PrintStream& out) const
+    {
+        out.print("  Allocations:\n");
+        for (const auto& entry : m_allocations)
+            out.print("    #", entry.key, ": ", entry.value, "\n");
+        out.print("  Pointers:\n");
+        for (const auto& entry : m_pointers)
+            out.print("    ", entry.key, " => #", entry.value, "\n");
+    }
+
+    bool reached() const
+    {
+        return m_reached;
+    }
+
+    void setReached()
+    {
+        m_reached = true;
+    }
+
+private:
+    // When we merge two heaps, we escape all fields of allocations,
+    // unless they point to the same thing in both heaps.
+    // The reason for this is that it allows us not to do extra work
+    // for diamond graphs where we would otherwise have to check
+    // whether we have a single definition or not, which would be
+    // cumbersome.
+    //
+    // Note that we should try to unify nodes even when they are not
+    // from the same allocation; for instance we should be able to
+    // completely eliminate all allocations from the following graph:
+    //
+    // Block #0
+    //  0: NewObject({})
+    //  -: Branch(#1, #2)
+    //
+    // Block #1
+    //  1: NewObject({})
+    //  -: PutByOffset(@1, "left", val)
+    //  -: PutStructure(@1, {val:0})
+    //  -: PutByOffset(@0, @1, x)
+    //  -: PutStructure(@0, {x:0})
+    //  -: Jump(#3)
+    //
+    // Block #2
+    //  2: NewObject({})
+    //  -: PutByOffset(@2, "right", val)
+    //  -: PutStructure(@2, {val:0})
+    //  -: PutByOffset(@0, @2, x)
+    //  -: PutStructure(@0, {x:0})
+    //  -: Jump(#3)
+    //
+    // Block #3:
+    //  3: GetByOffset(@0, x)
+    //  4: GetByOffset(@3, val)
+    //  -: Return(@4)
+    template<typename Key>
+    static void mergePointerSets(HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their, NodeSet& toEscape)
+    {
+        auto escape = [&] (Node* identifier) {
+            toEscape.addVoid(identifier);
+        };
+
+        for (const auto& entry : their) {
+            if (!my.contains(entry.key))
+                escape(entry.value);
+        }
+        my.removeIf([&] (const auto& entry) {
+            auto iter = their.find(entry.key);
+            if (iter == their.end()) {
+                escape(entry.value);
+                return true;
+            }
+            if (iter->value != entry.value) {
+                escape(entry.value);
+                escape(iter->value);
+                return true;
+            }
+            return false;
+        });
+    }
+
+    void escapeAllocation(Node* identifier)
+    {
+        Allocation& allocation = getAllocation(identifier);
+        if (allocation.isEscapedAllocation())
+            return;
+
+        Allocation unescaped = WTFMove(allocation);
+        allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
+
+        for (const auto& entry : unescaped.fields())
+            escapeAllocation(entry.value);
+
+        if (m_wantEscapees)
+            m_escapees.add(unescaped.identifier(), WTFMove(unescaped));
+    }
+
+    void prune()
+    {
+        NodeSet reachable;
+        for (const auto& entry : m_pointers)
+            reachable.addVoid(entry.value);
+
+        // Repeatedly mark as reachable allocations in fields of other
+        // reachable allocations
+        {
+            Vector<Node*> worklist;
+            worklist.appendRange(reachable.begin(), reachable.end());
+
+            while (!worklist.isEmpty()) {
+                Node* identifier = worklist.takeLast();
+                Allocation& allocation = m_allocations.find(identifier)->value;
+                for (const auto& entry : allocation.fields()) {
+                    if (reachable.add(entry.value).isNewEntry)
+                        worklist.append(entry.value);
+                }
+            }
+        }
+
+        // Remove unreachable allocations
+        m_allocations.removeIf(
+            [&] (const auto& entry) {
+                return !reachable.contains(entry.key);
+            });
+    }
+
+    bool m_reached = false;
+    HashMap<Node*, Node*> m_pointers;
+    HashMap<Node*, Allocation> m_allocations;
+
+    bool m_wantEscapees = false;
+    HashMap<Node*, Allocation> m_escapees;
+};
 
 class ObjectAllocationSinkingPhase : public Phase {
 public:
     ObjectAllocationSinkingPhase(Graph& graph)
-        : Phase(graph, "object allocation sinking")
-        , m_ssaCalculator(graph)
+        : Phase(graph, "object allocation elimination")
+        , m_pointerSSA(graph)
+        , m_allocationSSA(graph)
         , m_insertionSet(graph)
     {
     }
-    
+
     bool run()
     {
+        ASSERT(m_graph.m_form == SSA);
         ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
-        
-        m_graph.m_dominators.computeIfNecessary(m_graph);
-        
-        // Logically we wish to consider every NewObject and sink it. However it's probably not
-        // profitable to sink a NewObject that will always escape. So, first we do a very simple
-        // forward flow analysis that determines the set of NewObject nodes that have any chance
-        // of benefiting from object allocation sinking. Then we fixpoint the following rules:
-        //
-        // - For each NewObject, we turn the original NewObject into a PhantomNewObject and then
-        //   we insert MaterializeNewObject just before those escaping sites that come before any
-        //   other escaping sites - that is, there is no path between the allocation and those sites
-        //   that would see any other escape. Note that Upsilons constitute escaping sites. Then we
-        //   insert additional MaterializeNewObject nodes on Upsilons that feed into Phis that mix
-        //   materializations and the original PhantomNewObject. We then turn each PutByOffset over a
-        //   PhantomNewObject into a PutByOffsetHint.
-        //
-        // - We perform the same optimization for MaterializeNewObject. This allows us to cover
-        //   cases where we had MaterializeNewObject flowing into a PutByOffsetHint.
-        //
-        // We could also add this rule:
-        //
-        // - If all of the Upsilons of a Phi have a MaterializeNewObject that isn't used by anyone
-        //   else, then replace the Phi with the MaterializeNewObject.
-        //
-        //   FIXME: Implement this. Note that this totally doable but it requires some gnarly
-        //   code, and to be effective the pruner needs to be aware of it. Currently any Upsilon
-        //   is considered to be an escape even by the pruner, so it's unlikely that we'll see
-        //   many cases of Phi over Materializations.
-        //   https://bugs.webkit.org/show_bug.cgi?id=136927
-        
+
         if (!performSinking())
             return false;
-        
-        while (performSinking()) { }
-        
-        if (verbose) {
-            dataLog("Graph after sinking:\n");
+
+        if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
+            dataLog("Graph after elimination:\n");
             m_graph.dump();
         }
-        
+
         return true;
     }
 
@@ -105,791 +725,1680 @@ private:
     bool performSinking()
     {
         m_graph.computeRefCounts();
+        m_graph.initializeNodeOwners();
+        m_graph.ensureSSADominators();
         performLivenessAnalysis(m_graph);
         performOSRAvailabilityAnalysis(m_graph);
-        
+        m_combinedLiveness = CombinedLiveness(m_graph);
+
         CString graphBeforeSinking;
         if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
             StringPrintStream out;
             m_graph.dump(out);
             graphBeforeSinking = out.toCString();
         }
-        
-        if (verbose) {
-            dataLog("Graph before sinking:\n");
+
+        if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
+            dataLog("Graph before elimination:\n");
             m_graph.dump();
         }
-        
-        determineMaterializationPoints();
-        if (m_sinkCandidates.isEmpty())
+
+        performAnalysis();
+
+        if (!determineSinkCandidates())
             return false;
-        
-        // At this point we are committed to sinking the sinking candidates.
-        placeMaterializationPoints();
-        lowerNonReadingOperationsOnPhantomAllocations();
-        promoteSunkenFields();
-        
+
+        if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
+            for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
+                dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
+                dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
+            }
+        }
+
+        promoteLocalHeap();
+        removeICStatusFilters();
+
         if (Options::validateGraphAtEachPhase())
-            validate(m_graph, DumpGraph, graphBeforeSinking);
-        
-        if (verbose)
-            dataLog("Sinking iteration changed the graph.\n");
+            DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
         return true;
     }
-    
-    void determineMaterializationPoints()
+
+    void performAnalysis()
     {
-        // The premise of this pass is that if there exists a point in the program where some
-        // path from a phantom allocation site to that point causes materialization, then *all*
-        // paths cause materialization. This should mean that there are never any redundant
-        // materializations.
-        
-        m_sinkCandidates.clear();
-        m_materializationToEscapee.clear();
-        m_materializationSiteToMaterializations.clear();
-        
-        BlockMap<HashMap<Node*, bool>> materializedAtHead(m_graph);
-        BlockMap<HashMap<Node*, bool>> materializedAtTail(m_graph);
-        
+        m_heapAtHead = BlockMap<LocalHeap>(m_graph);
+        m_heapAtTail = BlockMap<LocalHeap>(m_graph);
+
         bool changed;
         do {
-            if (verbose)
-                dataLog("Doing iteration of materialization point placement.\n");
+            if (DFGObjectAllocationSinkingPhaseInternal::verbose)
+                dataLog("Doing iteration of escape analysis.\n");
             changed = false;
-            for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-                HashMap<Node*, bool> materialized = materializedAtHead[block];
-                if (verbose)
-                    dataLog("    Looking at block ", pointerDump(block), "\n");
+
+            for (BasicBlock* block : m_graph.blocksInPreOrder()) {
+                m_heapAtHead[block].setReached();
+                m_heap = m_heapAtHead[block];
+
                 for (Node* node : *block) {
                     handleNode(
                         node,
-                        [&] () {
-                            materialized.add(node, false);
-                        },
-                        [&] (Node* escapee) {
-                            auto iter = materialized.find(escapee);
-                            if (iter != materialized.end()) {
-                                if (verbose)
-                                    dataLog("    ", escapee, " escapes at ", node, "\n");
-                                iter->value = true;
-                            }
+                        [] (PromotedHeapLocation, LazyNode) { },
+                        [&] (PromotedHeapLocation) -> Node* {
+                            return nullptr;
                         });
                 }
-                
-                if (verbose)
-                    dataLog("    Materialized at tail of ", pointerDump(block), ": ", mapDump(materialized), "\n");
-                
-                if (materialized == materializedAtTail[block])
+
+                if (m_heap == m_heapAtTail[block])
                     continue;
-                
-                materializedAtTail[block] = materialized;
+
+                m_heapAtTail[block] = m_heap;
                 changed = true;
-                
-                // Only propagate things to our successors if they are alive in all successors.
-                // So, we prune materialized-at-tail to only include things that are live.
-                Vector<Node*> toRemove;
-                for (auto pair : materialized) {
-                    if (!block->ssa->liveAtTail.contains(pair.key))
-                        toRemove.append(pair.key);
-                }
-                for (Node* key : toRemove)
-                    materialized.remove(key);
-                
-                for (BasicBlock* successorBlock : block->successors()) {
-                    for (auto pair : materialized) {
-                        materializedAtHead[successorBlock].add(
-                            pair.key, false).iterator->value |= pair.value;
+
+                m_heap.assertIsValid();
+
+                // We keep only pointers that are live, and only
+                // allocations that are either live, pointed to by a
+                // live pointer, or (recursively) stored in a field of
+                // a live allocation.
+                //
+                // This means we can accidentaly leak non-dominating
+                // nodes into the successor. However, due to the
+                // non-dominance property, we are guaranteed that the
+                // successor has at least one predecessor that is not
+                // dominated either: this means any reference to a
+                // non-dominating allocation in the successor will
+                // trigger an escape and get pruned during the merge.
+                m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
+
+                for (BasicBlock* successorBlock : block->successors())
+                    m_heapAtHead[successorBlock].merge(m_heap);
+            }
+        } while (changed);
+    }
+
+    template<typename WriteFunctor, typename ResolveFunctor>
+    void handleNode(
+        Node* node,
+        const WriteFunctor& heapWrite,
+        const ResolveFunctor& heapResolve)
+    {
+        m_heap.assertIsValid();
+        ASSERT(m_heap.takeEscapees().isEmpty());
+
+        Allocation* target = nullptr;
+        HashMap<PromotedLocationDescriptor, LazyNode> writes;
+        PromotedLocationDescriptor exactRead;
+
+        switch (node->op()) {
+        case NewObject:
+            target = &m_heap.newAllocation(node, Allocation::Kind::Object);
+            target->setStructures(node->structure());
+            writes.add(
+                StructurePLoc, LazyNode(m_graph.freeze(node->structure().get())));
+            break;
+
+        case NewFunction:
+        case NewGeneratorFunction:
+        case NewAsyncGeneratorFunction:
+        case NewAsyncFunction: {
+            if (isStillValid(node->castOperand<FunctionExecutable*>()->singletonFunction())) {
+                m_heap.escape(node->child1().node());
+                break;
+            }
+
+            if (node->op() == NewGeneratorFunction)
+                target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction);
+            else if (node->op() == NewAsyncFunction)
+                target = &m_heap.newAllocation(node, Allocation::Kind::AsyncFunction);
+            else if (node->op() == NewAsyncGeneratorFunction)
+                target = &m_heap.newAllocation(node, Allocation::Kind::AsyncGeneratorFunction);
+            else
+                target = &m_heap.newAllocation(node, Allocation::Kind::Function);
+
+            writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
+            writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
+            break;
+        }
+
+        case NewRegexp: {
+            target = &m_heap.newAllocation(node, Allocation::Kind::RegExpObject);
+
+            writes.add(RegExpObjectRegExpPLoc, LazyNode(node->cellOperand()));
+            writes.add(RegExpObjectLastIndexPLoc, LazyNode(node->child1().node()));
+            break;
+        }
+
+        case CreateActivation: {
+            if (isStillValid(node->castOperand<SymbolTable*>()->singletonScope())) {
+                m_heap.escape(node->child1().node());
+                break;
+            }
+            target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
+            writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
+            writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
+            {
+                SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
+                LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
+                for (unsigned offset = 0; offset < symbolTable->scopeSize(); ++offset) {
+                    writes.add(
+                        PromotedLocationDescriptor(ClosureVarPLoc, offset),
+                        initialValue);
+                }
+            }
+            break;
+        }
+
+        case PutStructure:
+            target = m_heap.onlyLocalAllocation(node->child1().node());
+            if (target && target->isObjectAllocation()) {
+                writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next.get()))));
+                target->setStructures(node->transition()->next);
+            } else
+                m_heap.escape(node->child1().node());
+            break;
+
+        case CheckStructureOrEmpty:
+        case CheckStructure: {
+            Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
+            if (allocation && allocation->isObjectAllocation()) {
+                RegisteredStructureSet filteredStructures = allocation->structures();
+                filteredStructures.filter(node->structureSet());
+                if (filteredStructures.isEmpty()) {
+                    // FIXME: Write a test for this:
+                    // https://bugs.webkit.org/show_bug.cgi?id=174322
+                    m_heap.escape(node->child1().node());
+                    break;
+                }
+                allocation->setStructures(filteredStructures);
+                if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
+                    node->convertToCheckStructureImmediate(value);
+            } else
+                m_heap.escape(node->child1().node());
+            break;
+        }
+
+        case GetByOffset:
+        case GetGetterSetterByOffset:
+            target = m_heap.onlyLocalAllocation(node->child2().node());
+            if (target && target->isObjectAllocation()) {
+                unsigned identifierNumber = node->storageAccessData().identifierNumber;
+                exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
+            } else {
+                m_heap.escape(node->child1().node());
+                m_heap.escape(node->child2().node());
+            }
+            break;
+
+        case MultiGetByOffset: {
+            Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
+            if (allocation && allocation->isObjectAllocation()) {
+                MultiGetByOffsetData& data = node->multiGetByOffsetData();
+                RegisteredStructureSet validStructures;
+                bool hasInvalidStructures = false;
+                for (const auto& multiGetByOffsetCase : data.cases) {
+                    if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
+                        continue;
+
+                    switch (multiGetByOffsetCase.method().kind()) {
+                    case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
+                    case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
+                        hasInvalidStructures = true;
+                        break;
+
+                    case GetByOffsetMethod::Load: // We're good
+                        validStructures.merge(multiGetByOffsetCase.set());
+                        break;
+
+                    default:
+                        RELEASE_ASSERT_NOT_REACHED();
+                    }
+                }
+                if (hasInvalidStructures || validStructures.isEmpty()) {
+                    m_heap.escape(node->child1().node());
+                    break;
+                }
+                unsigned identifierNumber = data.identifierNumber;
+                PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
+                if (Node* value = heapResolve(location)) {
+                    if (allocation->structures().isSubsetOf(validStructures))
+                        node->replaceWithWithoutChecks(value);
+                    else {
+                        Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
+                        ASSERT(structure);
+                        allocation->filterStructures(validStructures);
+                        node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
+                        node->convertToCheckStructureImmediate(structure);
+                        node->setReplacement(value);
                     }
+                } else if (!allocation->structures().isSubsetOf(validStructures)) {
+                    // Even though we don't need the result here, we still need
+                    // to make the call to tell our caller that we could need
+                    // the StructurePLoc.
+                    // The reason for this is that when we decide not to sink a
+                    // node, we will still lower any read to its fields before
+                    // it escapes (which are usually reads across a function
+                    // call that DFGClobberize can't handle) - but we only do
+                    // this for PromotedHeapLocations that we have seen read
+                    // during the analysis!
+                    heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
+                    allocation->filterStructures(validStructures);
                 }
+                Node* identifier = allocation->get(location.descriptor());
+                if (identifier)
+                    m_heap.newPointer(node, identifier);
+            } else
+                m_heap.escape(node->child1().node());
+            break;
+        }
+
+        case PutByOffset:
+            target = m_heap.onlyLocalAllocation(node->child2().node());
+            if (target && target->isObjectAllocation()) {
+                unsigned identifierNumber = node->storageAccessData().identifierNumber;
+                writes.add(
+                    PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
+                    LazyNode(node->child3().node()));
+            } else {
+                m_heap.escape(node->child1().node());
+                m_heap.escape(node->child2().node());
+                m_heap.escape(node->child3().node());
             }
-        } while (changed);
-        
-        // Determine the sink candidates. Broadly, a sink candidate is a node that handleNode()
-        // believes is sinkable, and one of the following is true:
+            break;
+
+        case GetClosureVar:
+            target = m_heap.onlyLocalAllocation(node->child1().node());
+            if (target && target->isActivationAllocation()) {
+                exactRead =
+                    PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
+            } else
+                m_heap.escape(node->child1().node());
+            break;
+
+        case PutClosureVar:
+            target = m_heap.onlyLocalAllocation(node->child1().node());
+            if (target && target->isActivationAllocation()) {
+                writes.add(
+                    PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
+                    LazyNode(node->child2().node()));
+            } else {
+                m_heap.escape(node->child1().node());
+                m_heap.escape(node->child2().node());
+            }
+            break;
+
+        case SkipScope:
+            target = m_heap.onlyLocalAllocation(node->child1().node());
+            if (target && target->isActivationAllocation())
+                exactRead = ActivationScopePLoc;
+            else
+                m_heap.escape(node->child1().node());
+            break;
+
+        case GetExecutable:
+            target = m_heap.onlyLocalAllocation(node->child1().node());
+            if (target && target->isFunctionAllocation())
+                exactRead = FunctionExecutablePLoc;
+            else
+                m_heap.escape(node->child1().node());
+            break;
+
+        case GetScope:
+            target = m_heap.onlyLocalAllocation(node->child1().node());
+            if (target && target->isFunctionAllocation())
+                exactRead = FunctionActivationPLoc;
+            else
+                m_heap.escape(node->child1().node());
+            break;
+
+        case GetRegExpObjectLastIndex:
+            target = m_heap.onlyLocalAllocation(node->child1().node());
+            if (target && target->isRegExpObjectAllocation())
+                exactRead = RegExpObjectLastIndexPLoc;
+            else
+                m_heap.escape(node->child1().node());
+            break;
+
+        case SetRegExpObjectLastIndex:
+            target = m_heap.onlyLocalAllocation(node->child1().node());
+            if (target && target->isRegExpObjectAllocation()) {
+                writes.add(
+                    PromotedLocationDescriptor(RegExpObjectLastIndexPLoc),
+                    LazyNode(node->child2().node()));
+            } else {
+                m_heap.escape(node->child1().node());
+                m_heap.escape(node->child2().node());
+            }
+            break;
+
+        case Check:
+        case CheckVarargs:
+            m_graph.doToChildren(
+                node,
+                [&] (Edge edge) {
+                    if (edge.willNotHaveCheck())
+                        return;
+
+                    if (alreadyChecked(edge.useKind(), SpecObject))
+                        return;
+
+                    m_heap.escape(edge.node());
+                });
+            break;
+
+        case MovHint:
+        case PutHint:
+            // Handled by OSR availability analysis
+            break;
+            
+        case FilterCallLinkStatus:
+        case FilterGetByIdStatus:
+        case FilterPutByIdStatus:
+        case FilterInByIdStatus:
+            break;
+
+        default:
+            m_graph.doToChildren(
+                node,
+                [&] (Edge edge) {
+                    m_heap.escape(edge.node());
+                });
+            break;
+        }
+
+        if (exactRead) {
+            ASSERT(target);
+            ASSERT(writes.isEmpty());
+            if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
+                ASSERT(!value->replacement());
+                node->replaceWith(m_graph, value);
+            }
+            Node* identifier = target->get(exactRead);
+            if (identifier)
+                m_heap.newPointer(node, identifier);
+        }
+
+        for (auto entry : writes) {
+            ASSERT(target);
+            if (entry.value.isNode())
+                target->set(entry.key, m_heap.follow(entry.value.asNode()));
+            else
+                target->remove(entry.key);
+            heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
+        }
+
+        m_heap.assertIsValid();
+    }
+
+    bool determineSinkCandidates()
+    {
+        m_sinkCandidates.clear();
+        m_materializationToEscapee.clear();
+        m_materializationSiteToMaterializations.clear();
+        m_materializationSiteToRecoveries.clear();
+        m_materializationSiteToHints.clear();
+
+        // Logically we wish to consider every allocation and sink
+        // it. However, it is probably not profitable to sink an
+        // allocation that will always escape. So, we only sink an
+        // allocation if one of the following is true:
         //
-        // 1) There exists a basic block with only backward outgoing edges (or no outgoing edges)
-        //    in which the node wasn't materialized. This is meant to catch effectively-infinite
-        //    loops in which we don't need to have allocated the object.
+        // 1) There exists a basic block with only backwards outgoing
+        //    edges (or no outgoing edges) in which the node wasn't
+        //    materialized. This is meant to catch
+        //    effectively-infinite loops in which we don't need to
+        //    have allocated the object.
         //
-        // 2) There exists a basic block at the tail of which the node is not materialized and the
-        //    node is dead.
+        // 2) There exists a basic block at the tail of which the node
+        //    is dead and not materialized.
         //
-        // 3) The sum of execution counts of the materializations is less than the sum of
-        //    execution counts of the original node.
+        // 3) The sum of execution counts of the materializations is
+        //    less than the sum of execution counts of the original
+        //    node.
         //
         // We currently implement only rule #2.
         // FIXME: Implement the two other rules.
         // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
         // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
-        
-        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-            for (auto pair : materializedAtTail[block]) {
-                if (pair.value)
-                    continue; // It was materialized.
-                
-                if (block->ssa->liveAtTail.contains(pair.key))
-                    continue; // It might still get materialized in all of the successors.
-                
-                // We know that it died in this block and it wasn't materialized. That means that
-                // if we sink this allocation, then *this* will be a path along which we never
-                // have to allocate. Profit!
-                m_sinkCandidates.add(pair.key);
-            }
-        }
-        
-        if (m_sinkCandidates.isEmpty())
-            return;
-        
-        // A materialization edge exists at any point where a node escapes but hasn't been
-        // materialized yet. We do this in two parts. First we find all of the nodes that cause
-        // escaping to happen, where the escapee had not yet been materialized. This catches
-        // everything but loops. We then catch loops - as well as weirder control flow constructs -
-        // in a subsequent pass that looks at places in the CFG where an edge exists from a block
-        // that hasn't materialized to a block that has. We insert the materialization along such an
-        // edge, and we rely on the fact that critical edges were already broken so that we actually
-        // either insert the materialization at the head of the successor or the tail of the
-        // predecessor.
         //
-        // FIXME: This can create duplicate allocations when we really only needed to perform one.
-        // For example:
+        // However, these rules allow for a sunk object to be put into
+        // a non-sunk one, which we don't support. We could solve this
+        // by supporting PutHints on local allocations, making these
+        // objects only partially correct, and we would need to adapt
+        // the OSR availability analysis and OSR exit to handle
+        // this. This would be totally doable, but would create a
+        // super rare, and thus bug-prone, code path.
+        // So, instead, we need to implement one of the following
+        // closure rules:
         //
-        //     var o = new Object();
-        //     if (rare) {
-        //         if (stuff)
-        //             call(o); // o escapes here.
-        //         return;
-        //     }
-        //     // o doesn't escape down here.
+        // 1) If we put a sink candidate into a local allocation that
+        //    is not a sink candidate, change our minds and don't
+        //    actually sink the sink candidate.
         //
-        // In this example, we would place a materialization point at call(o) and then we would find
-        // ourselves having to insert another one at the implicit else case of that if statement
-        // ('cause we've broken critical edges). We would instead really like to just have one
-        // materialization point right at the top of the then case of "if (rare)". To do this, we can
-        // find the LCA of the various materializations in the dom tree.
-        // https://bugs.webkit.org/show_bug.cgi?id=137124
-        
-        // First pass: find intra-block materialization points.
-        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-            HashSet<Node*> materialized;
-            for (auto pair : materializedAtHead[block]) {
-                if (pair.value && m_sinkCandidates.contains(pair.key))
-                    materialized.add(pair.key);
-            }
-            
-            if (verbose)
-                dataLog("Placing materialization points in ", pointerDump(block), " with materialization set ", listDump(materialized), "\n");
-            
-            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
-                Node* node = block->at(nodeIndex);
-                
+        // 2) If we put a sink candidate into a local allocation, that
+        //    allocation becomes a sink candidate as well.
+        //
+        // We currently choose to implement closure rule #2.
+        HashMap<Node*, Vector<Node*>> dependencies;
+        bool hasUnescapedReads = false;
+        for (BasicBlock* block : m_graph.blocksInPreOrder()) {
+            m_heap = m_heapAtHead[block];
+
+            for (Node* node : *block) {
                 handleNode(
                     node,
-                    [&] () { },
-                    [&] (Node* escapee) {
-                        if (verbose)
-                            dataLog("Seeing ", escapee, " escape at ", node, "\n");
-                        
-                        if (!m_sinkCandidates.contains(escapee)) {
-                            if (verbose)
-                                dataLog("    It's not a sink candidate.\n");
-                            return;
-                        }
-                        
-                        if (!materialized.add(escapee).isNewEntry) {
-                            if (verbose)
-                                dataLog("   It's already materialized.\n");
+                    [&] (PromotedHeapLocation location, LazyNode value) {
+                        if (!value.isNode())
                             return;
-                        }
-                        
-                        createMaterialize(escapee, node);
+
+                        Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
+                        if (allocation && !allocation->isEscapedAllocation())
+                            dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
+                    },
+                    [&] (PromotedHeapLocation) -> Node* {
+                        hasUnescapedReads = true;
+                        return nullptr;
                     });
             }
+
+            // The sink candidates are initially the unescaped
+            // allocations dying at tail of blocks
+            NodeSet allocations;
+            for (const auto& entry : m_heap.allocations()) {
+                if (!entry.value.isEscapedAllocation())
+                    allocations.addVoid(entry.key);
+            }
+
+            m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
+
+            for (Node* identifier : allocations) {
+                if (!m_heap.isAllocation(identifier))
+                    m_sinkCandidates.addVoid(identifier);
+            }
         }
-        
-        // Second pass: find CFG edge materialization points.
+
+        // Ensure that the set of sink candidates is closed for put operations
+        Vector<Node*> worklist;
+        worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
+
+        while (!worklist.isEmpty()) {
+            for (Node* identifier : dependencies.get(worklist.takeLast())) {
+                if (m_sinkCandidates.add(identifier).isNewEntry)
+                    worklist.append(identifier);
+            }
+        }
+
+        if (m_sinkCandidates.isEmpty())
+            return hasUnescapedReads;
+
+        if (DFGObjectAllocationSinkingPhaseInternal::verbose)
+            dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
+
+        // Create the materialization nodes
         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-            for (BasicBlock* successorBlock : block->successors()) {
-                for (auto pair : materializedAtHead[successorBlock]) {
-                    Node* allocation = pair.key;
-                    
-                    // We only care if it is materialized in the successor.
-                    if (!pair.value)
-                        continue;
-                    
-                    // We only care about sinking candidates.
-                    if (!m_sinkCandidates.contains(allocation))
-                        continue;
-                    
-                    // We only care if it isn't materialized in the predecessor.
-                    if (materializedAtTail[block].get(allocation))
-                        continue;
-                    
-                    // We need to decide if we put the materialization at the head of the successor,
-                    // or the tail of the predecessor. It needs to be placed so that the allocation
-                    // is never materialized before, and always materialized after.
-                    
-                    // Is it never materialized in any of successor's predecessors? I like to think
-                    // of "successors' predecessors" and "predecessor's successors" as the "shadow",
-                    // because of what it looks like when you draw it.
-                    bool neverMaterializedInShadow = true;
-                    for (BasicBlock* shadowBlock : successorBlock->predecessors) {
-                        if (materializedAtTail[shadowBlock].get(allocation)) {
-                            neverMaterializedInShadow = false;
-                            break;
-                        }
-                    }
-                    
-                    if (neverMaterializedInShadow) {
-                        createMaterialize(allocation, successorBlock->firstOriginNode());
+            m_heap = m_heapAtHead[block];
+            m_heap.setWantEscapees();
+
+            for (Node* node : *block) {
+                handleNode(
+                    node,
+                    [] (PromotedHeapLocation, LazyNode) { },
+                    [] (PromotedHeapLocation) -> Node* {
+                        return nullptr;
+                    });
+                auto escapees = m_heap.takeEscapees();
+                if (!escapees.isEmpty())
+                    placeMaterializations(escapees, node);
+            }
+
+            m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
+
+            {
+                HashMap<Node*, Allocation> escapingOnEdge;
+                for (const auto& entry : m_heap.allocations()) {
+                    if (entry.value.isEscapedAllocation())
                         continue;
+
+                    bool mustEscape = false;
+                    for (BasicBlock* successorBlock : block->successors()) {
+                        if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
+                            || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
+                            mustEscape = true;
                     }
-                    
-                    // Because we had broken critical edges, it must be the case that the
-                    // predecessor's successors all materialize the object. This is because the
-                    // previous case is guaranteed to catch the case where the successor only has
-                    // one predecessor. When critical edges are broken, this is also the only case
-                    // where the predecessor could have had multiple successors. Therefore we have
-                    // already handled the case where the predecessor has multiple successors.
-                    DFG_ASSERT(m_graph, block, block->numSuccessors() == 1);
-                    
-                    createMaterialize(allocation, block->last());
+
+                    if (mustEscape)
+                        escapingOnEdge.add(entry.key, entry.value);
                 }
+                placeMaterializations(WTFMove(escapingOnEdge), block->terminal());
             }
         }
+
+        return hasUnescapedReads || !m_sinkCandidates.isEmpty();
     }
-    
-    void placeMaterializationPoints()
+
+    void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
     {
-        m_ssaCalculator.reset();
-        
-        // The "variables" are the object allocations that we are sinking. So, nodeToVariable maps
-        // sink candidates (aka escapees) to the SSACalculator's notion of Variable, and indexToNode
-        // maps in the opposite direction using the SSACalculator::Variable::index() as the key.
-        HashMap<Node*, SSACalculator::Variable*> nodeToVariable;
-        Vector<Node*> indexToNode;
-        
-        for (Node* node : m_sinkCandidates) {
-            SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
-            nodeToVariable.add(node, variable);
-            ASSERT(indexToNode.size() == variable->index());
-            indexToNode.append(node);
-        }
-        
-        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-            for (Node* node : *block) {
-                if (SSACalculator::Variable* variable = nodeToVariable.get(node))
-                    m_ssaCalculator.newDef(variable, block, node);
-                
-                for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
-                    m_ssaCalculator.newDef(
-                        nodeToVariable.get(m_materializationToEscapee.get(materialize)),
-                        block, materialize);
+        // We don't create materializations if the escapee is not a
+        // sink candidate
+        escapees.removeIf(
+            [&] (const auto& entry) {
+                return !m_sinkCandidates.contains(entry.key);
+            });
+        if (escapees.isEmpty())
+            return;
+
+        // First collect the hints that will be needed when the node
+        // we materialize is still stored into other unescaped sink candidates.
+        // The way to interpret this vector is:
+        //
+        // PromotedHeapLocation(NotEscapedAllocation, field) = identifierAllocation
+        //
+        // e.g:
+        // PromotedHeapLocation(@PhantomNewFunction, FunctionActivationPLoc) = IdentifierOf(@MaterializeCreateActivation)
+        // or:
+        // PromotedHeapLocation(@PhantomCreateActivation, ClosureVarPLoc(x)) = IdentifierOf(@NewFunction)
+        //
+        // When the rhs of the `=` is to be materialized at this `where` point in the program
+        // and IdentifierOf(Materialization) is the original sunken allocation of the materialization.
+        //
+        // The reason we need to collect all the `identifiers` here is that
+        // we may materialize multiple versions of the allocation along control
+        // flow edges. We will PutHint these values along those edges. However,
+        // we also need to PutHint them when we join and have a Phi of the allocations.
+        Vector<std::pair<PromotedHeapLocation, Node*>> hints;
+        for (const auto& entry : m_heap.allocations()) {
+            if (escapees.contains(entry.key))
+                continue;
+
+            for (const auto& field : entry.value.fields()) {
+                ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
+                auto iter = escapees.find(field.value);
+                if (iter != escapees.end()) {
+                    ASSERT(m_sinkCandidates.contains(field.value));
+                    hints.append(std::make_pair(PromotedHeapLocation(entry.key, field.key), field.value));
                 }
             }
         }
-        
-        m_ssaCalculator.computePhis(
-            [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
-                Node* allocation = indexToNode[variable->index()];
-                if (!block->ssa->liveAtHead.contains(allocation))
-                    return nullptr;
-                
-                Node* phiNode = m_graph.addNode(allocation->prediction(), Phi, NodeOrigin());
-                phiNode->mergeFlags(NodeResultJS);
-                return phiNode;
-            });
-        
-        // Place Phis in the right places. Replace all uses of any allocation with the appropriate
-        // materialization. Create the appropriate Upsilon nodes.
-        LocalOSRAvailabilityCalculator availabilityCalculator;
-        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-            HashMap<Node*, Node*> mapping;
-            
-            for (Node* candidate : block->ssa->liveAtHead) {
-                SSACalculator::Variable* variable = nodeToVariable.get(candidate);
-                if (!variable)
-                    continue;
-                
-                SSACalculator::Def* def = m_ssaCalculator.reachingDefAtHead(block, variable);
-                if (!def)
-                    continue;
-                
-                mapping.set(indexToNode[variable->index()], def->value());
+
+        // Now we need to order the materialization. Any order is
+        // valid (as long as we materialize a node first if it is
+        // needed for the materialization of another node, e.g. a
+        // function's activation must be materialized before the
+        // function itself), but we want to try minimizing the number
+        // of times we have to place Puts to close cycles after a
+        // materialization. In other words, we are trying to find the
+        // minimum number of materializations to remove from the
+        // materialization graph to make it a DAG, known as the
+        // (vertex) feedback set problem. Unfortunately, this is a
+        // NP-hard problem, which we don't want to solve exactly.
+        //
+        // Instead, we use a simple greedy procedure, that procedes as
+        // follow:
+        //  - While there is at least one node with no outgoing edge
+        //    amongst the remaining materializations, materialize it
+        //    first
+        //
+        //  - Similarily, while there is at least one node with no
+        //    incoming edge amongst the remaining materializations,
+        //    materialize it last.
+        //
+        //  - When both previous conditions are false, we have an
+        //    actual cycle, and we need to pick a node to
+        //    materialize. We try greedily to remove the "pressure" on
+        //    the remaining nodes by choosing the node with maximum
+        //    |incoming edges| * |outgoing edges| as a measure of how
+        //    "central" to the graph it is. We materialize it first,
+        //    so that all the recoveries will be Puts of things into
+        //    it (rather than Puts of the materialization into other
+        //    objects), which means we will have a single
+        //    StoreBarrier.
+
+
+        // Compute dependencies between materializations
+        HashMap<Node*, NodeSet> dependencies;
+        HashMap<Node*, NodeSet> reverseDependencies;
+        HashMap<Node*, NodeSet> forMaterialization;
+        for (const auto& entry : escapees) {
+            auto& myDependencies = dependencies.add(entry.key, NodeSet()).iterator->value;
+            auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, NodeSet()).iterator->value;
+            reverseDependencies.add(entry.key, NodeSet());
+            for (const auto& field : entry.value.fields()) {
+                if (escapees.contains(field.value) && field.value != entry.key) {
+                    myDependencies.addVoid(field.value);
+                    reverseDependencies.add(field.value, NodeSet()).iterator->value.addVoid(entry.key);
+                    if (field.key.neededForMaterialization())
+                        myDependenciesForMaterialization.addVoid(field.value);
+                }
             }
-            
-            availabilityCalculator.beginBlock(block);
-            for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
-                m_insertionSet.insert(0, phiDef->value());
-                
-                Node* originalNode = indexToNode[phiDef->variable()->index()];
-                insertOSRHintsForUpdate(
-                    m_insertionSet, 0, NodeOrigin(), availabilityCalculator.m_availability,
-                    originalNode, phiDef->value());
+        }
 
-                mapping.set(originalNode, phiDef->value());
+        // Helper function to update the materialized set and the
+        // dependencies
+        NodeSet materialized;
+        auto materialize = [&] (Node* identifier) {
+            materialized.addVoid(identifier);
+            for (Node* dep : dependencies.get(identifier))
+                reverseDependencies.find(dep)->value.remove(identifier);
+            for (Node* rdep : reverseDependencies.get(identifier)) {
+                dependencies.find(rdep)->value.remove(identifier);
+                forMaterialization.find(rdep)->value.remove(identifier);
             }
-            
-            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
-                Node* node = block->at(nodeIndex);
+            dependencies.remove(identifier);
+            reverseDependencies.remove(identifier);
+            forMaterialization.remove(identifier);
+        };
 
-                for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
-                    Node* escapee = m_materializationToEscapee.get(materialize);
-                    m_insertionSet.insert(nodeIndex, materialize);
-                    insertOSRHintsForUpdate(
-                        m_insertionSet, nodeIndex, node->origin,
-                        availabilityCalculator.m_availability, escapee, materialize);
-                    mapping.set(escapee, materialize);
+        // Nodes without remaining unmaterialized fields will be
+        // materialized first - amongst the remaining unmaterialized
+        // nodes
+        StdList<Allocation> toMaterialize;
+        auto firstPos = toMaterialize.begin();
+        auto materializeFirst = [&] (Allocation&& allocation) {
+            materialize(allocation.identifier());
+            // We need to insert *after* the current position
+            if (firstPos != toMaterialize.end())
+                ++firstPos;
+            firstPos = toMaterialize.insert(firstPos, WTFMove(allocation));
+        };
+
+        // Nodes that no other unmaterialized node points to will be
+        // materialized last - amongst the remaining unmaterialized
+        // nodes
+        auto lastPos = toMaterialize.end();
+        auto materializeLast = [&] (Allocation&& allocation) {
+            materialize(allocation.identifier());
+            lastPos = toMaterialize.insert(lastPos, WTFMove(allocation));
+        };
+
+        // These are the promoted locations that contains some of the
+        // allocations we are currently escaping. If they are a location on
+        // some other allocation we are currently materializing, we will need
+        // to "recover" their value with a real put once the corresponding
+        // allocation is materialized; if they are a location on some other
+        // not-yet-materialized allocation, we will need a PutHint.
+        Vector<PromotedHeapLocation> toRecover;
+
+        // This loop does the actual cycle breaking
+        while (!escapees.isEmpty()) {
+            materialized.clear();
+
+            // Materialize nodes that won't require recoveries if we can
+            for (auto& entry : escapees) {
+                if (!forMaterialization.find(entry.key)->value.isEmpty())
+                    continue;
+
+                if (dependencies.find(entry.key)->value.isEmpty()) {
+                    materializeFirst(WTFMove(entry.value));
+                    continue;
                 }
-                    
-                availabilityCalculator.executeNode(node);
-                
-                m_graph.doToChildren(
-                    node,
-                    [&] (Edge& edge) {
-                        if (Node* materialize = mapping.get(edge.node()))
-                            edge.setNode(materialize);
-                    });
-                
-                // If you cause an escape, you shouldn't see the original escapee.
-                if (validationEnabled()) {
-                    handleNode(
-                        node,
-                        [&] () { },
-                        [&] (Node* escapee) {
-                            DFG_ASSERT(m_graph, node, !m_sinkCandidates.contains(escapee));
-                        });
+
+                if (reverseDependencies.find(entry.key)->value.isEmpty()) {
+                    materializeLast(WTFMove(entry.value));
+                    continue;
                 }
             }
-            
-            size_t upsilonInsertionPoint = block->size() - 1;
-            Node* upsilonWhere = block->last();
-            NodeOrigin upsilonOrigin = upsilonWhere->origin;
-            for (BasicBlock* successorBlock : block->successors()) {
-                for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
-                    Node* phiNode = phiDef->value();
-                    SSACalculator::Variable* variable = phiDef->variable();
-                    Node* allocation = indexToNode[variable->index()];
-                    
-                    Node* originalIncoming = mapping.get(allocation);
-                    Node* incoming;
-                    if (originalIncoming == allocation) {
-                        // If we have a Phi that combines materializations with the original
-                        // phantom object, then the path with the phantom object must materialize.
-                        
-                        incoming = createMaterialize(allocation, upsilonWhere);
-                        m_insertionSet.insert(upsilonInsertionPoint, incoming);
-                        insertOSRHintsForUpdate(
-                            m_insertionSet, upsilonInsertionPoint, upsilonOrigin,
-                            availabilityCalculator.m_availability, originalIncoming, incoming);
-                    } else
-                        incoming = originalIncoming;
-                    
-                    m_insertionSet.insertNode(
-                        upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
-                        OpInfo(phiNode), incoming->defaultEdge());
+
+            // We reach this only if there is an actual cycle that needs
+            // breaking. Because we do not want to solve a NP-hard problem
+            // here, we just heuristically pick a node and materialize it
+            // first.
+            if (materialized.isEmpty()) {
+                uint64_t maxEvaluation = 0;
+                Allocation* bestAllocation = nullptr;
+                for (auto& entry : escapees) {
+                    if (!forMaterialization.find(entry.key)->value.isEmpty())
+                        continue;
+
+                    uint64_t evaluation =
+                        static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
+                    if (evaluation > maxEvaluation) {
+                        maxEvaluation = evaluation;
+                        bestAllocation = &entry.value;
+                    }
                 }
+                RELEASE_ASSERT(maxEvaluation > 0);
+
+                materializeFirst(WTFMove(*bestAllocation));
             }
-            
-            m_insertionSet.execute(block);
+            RELEASE_ASSERT(!materialized.isEmpty());
+
+            for (Node* identifier : materialized)
+                escapees.remove(identifier);
         }
-        
-        // At this point we have dummy materialization nodes along with edges to them. This means
-        // that the part of the control flow graph that prefers to see actual object allocations
-        // is completely fixed up, except for the materializations themselves.
+
+        materialized.clear();
+
+        NodeSet escaped;
+        for (const Allocation& allocation : toMaterialize)
+            escaped.addVoid(allocation.identifier());
+        for (const Allocation& allocation : toMaterialize) {
+            for (const auto& field : allocation.fields()) {
+                if (escaped.contains(field.value) && !materialized.contains(field.value))
+                    toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
+            }
+            materialized.addVoid(allocation.identifier());
+        }
+
+        Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
+            where, Vector<Node*>()).iterator->value;
+
+        for (const Allocation& allocation : toMaterialize) {
+            Node* materialization = createMaterialization(allocation, where);
+            materializations.append(materialization);
+            m_materializationToEscapee.add(materialization, allocation.identifier());
+        }
+
+        if (!toRecover.isEmpty()) {
+            m_materializationSiteToRecoveries.add(
+                where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
+        }
+
+        // The hints need to be after the "real" recoveries so that we
+        // don't hint not-yet-complete objects
+        m_materializationSiteToHints.add(
+            where, Vector<std::pair<PromotedHeapLocation, Node*>>()).iterator->value.appendVector(hints);
     }
-    
-    void lowerNonReadingOperationsOnPhantomAllocations()
+
+    Node* createMaterialization(const Allocation& allocation, Node* where)
     {
-        // Lower everything but reading operations on phantom allocations. We absolutely have to
-        // lower all writes so as to reveal them to the SSA calculator. We cannot lower reads
-        // because the whole point is that those go away completely.
-        
-        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
-                Node* node = block->at(nodeIndex);
-                switch (node->op()) {
-                case PutByOffset: {
-                    if (m_sinkCandidates.contains(node->child2().node()))
-                        node->convertToPutByOffsetHint();
-                    break;
-                }
-                    
-                case PutStructure: {
-                    if (m_sinkCandidates.contains(node->child1().node())) {
-                        Node* structure = m_insertionSet.insertConstant(
-                            nodeIndex, node->origin, JSValue(node->transition()->next));
-                        node->convertToPutStructureHint(structure);
-                    }
-                    break;
-                }
-                    
-                case NewObject: {
-                    if (m_sinkCandidates.contains(node)) {
-                        Node* structure = m_insertionSet.insertConstant(
-                            nodeIndex + 1, node->origin, JSValue(node->structure()));
-                        m_insertionSet.insertNode(
-                            nodeIndex + 1, SpecNone, PutStructureHint, node->origin,
-                            Edge(node, KnownCellUse), Edge(structure, KnownCellUse));
-                        node->convertToPhantomNewObject();
-                    }
-                    break;
-                }
-                    
-                case MaterializeNewObject: {
-                    if (m_sinkCandidates.contains(node)) {
-                        m_insertionSet.insertNode(
-                            nodeIndex + 1, SpecNone, PutStructureHint, node->origin,
-                            Edge(node, KnownCellUse), m_graph.varArgChild(node, 0));
-                        for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
-                            m_insertionSet.insertNode(
-                                nodeIndex + 1, SpecNone, PutByOffsetHint, node->origin,
-                                Edge(node, KnownCellUse), m_graph.varArgChild(node, i + 1));
-                        }
-                        node->convertToPhantomNewObject();
-                    }
-                    break;
-                }
-                    
-                case StoreBarrier:
-                case StoreBarrierWithNullCheck: {
-                    if (m_sinkCandidates.contains(node->child1().node()))
-                        node->convertToPhantom();
-                    break;
-                }
-                    
-                default:
-                    break;
-                }
-                
-                m_graph.doToChildren(
-                    node,
-                    [&] (Edge& edge) {
-                        if (m_sinkCandidates.contains(edge.node()))
-                            edge.setUseKind(KnownCellUse);
-                    });
+        // FIXME: This is the only place where we actually use the
+        // fact that an allocation's identifier is indeed the node
+        // that created the allocation.
+        switch (allocation.kind()) {
+        case Allocation::Kind::Object: {
+            ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
+
+            return m_graph.addNode(
+                allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
+                where->origin.withSemantic(allocation.identifier()->origin.semantic),
+                OpInfo(m_graph.addStructureSet(allocation.structures())), OpInfo(data), 0, 0);
+        }
+
+        case Allocation::Kind::AsyncGeneratorFunction:
+        case Allocation::Kind::AsyncFunction:
+        case Allocation::Kind::GeneratorFunction:
+        case Allocation::Kind::Function: {
+            FrozenValue* executable = allocation.identifier()->cellOperand();
+            
+            NodeType nodeType;
+            switch (allocation.kind()) {
+            case Allocation::Kind::GeneratorFunction:
+                nodeType = NewGeneratorFunction;
+                break;
+            case Allocation::Kind::AsyncGeneratorFunction:
+                nodeType = NewAsyncGeneratorFunction;
+                break;
+            case Allocation::Kind::AsyncFunction:
+                nodeType = NewAsyncFunction;
+                break;
+            default:
+                nodeType = NewFunction;
             }
-            m_insertionSet.execute(block);
+
+            return m_graph.addNode(
+                allocation.identifier()->prediction(), nodeType,
+                where->origin.withSemantic(
+                    allocation.identifier()->origin.semantic),
+                OpInfo(executable));
+        }
+
+        case Allocation::Kind::Activation: {
+            ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
+            FrozenValue* symbolTable = allocation.identifier()->cellOperand();
+
+            return m_graph.addNode(
+                allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
+                where->origin.withSemantic(
+                    allocation.identifier()->origin.semantic),
+                OpInfo(symbolTable), OpInfo(data), 0, 0);
+        }
+
+        case Allocation::Kind::RegExpObject: {
+            FrozenValue* regExp = allocation.identifier()->cellOperand();
+            return m_graph.addNode(
+                allocation.identifier()->prediction(), NewRegexp,
+                where->origin.withSemantic(
+                    allocation.identifier()->origin.semantic),
+                OpInfo(regExp));
+        }
+
+        default:
+            DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
         }
     }
-    
-    void promoteSunkenFields()
+
+    void promoteLocalHeap()
     {
-        // Collect the set of heap locations that we will be operating over.
+        // Collect the set of heap locations that we will be operating
+        // over.
         HashSet<PromotedHeapLocation> locations;
         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
+            m_heap = m_heapAtHead[block];
+
             for (Node* node : *block) {
-                promoteHeapAccess(
+                handleNode(
                     node,
-                    [&] (PromotedHeapLocation location, Edge) {
+                    [&] (PromotedHeapLocation location, LazyNode) {
+                        // If the location is not on a sink candidate,
+                        // we only sink it if it is read
                         if (m_sinkCandidates.contains(location.base()))
-                            locations.add(location);
+                            locations.addVoid(location);
                     },
-                    [&] (PromotedHeapLocation location) {
-                        if (m_sinkCandidates.contains(location.base()))
-                            locations.add(location);
+                    [&] (PromotedHeapLocation location) -> Node* {
+                        locations.addVoid(location);
+                        return nullptr;
                     });
             }
         }
-        
+
         // Figure out which locations belong to which allocations.
         m_locationsForAllocation.clear();
         for (PromotedHeapLocation location : locations) {
-            auto result = m_locationsForAllocation.add(location.base(), Vector<PromotedHeapLocation>());
+            auto result = m_locationsForAllocation.add(
+                location.base(),
+                Vector<PromotedHeapLocation>());
             ASSERT(!result.iterator->value.contains(location));
             result.iterator->value.append(location);
         }
-        
-        // For each sunken thingy, make sure we create Bottom values for all of its fields.
-        // Note that this has the hilarious slight inefficiency of creating redundant hints for
-        // things that were previously materializations. This should only impact compile times and
-        // not code quality, and it's necessary for soundness without some data structure hackage.
-        // For example, a MaterializeNewObject that we choose to sink may have new fields added to
-        // it conditionally. That would necessitate Bottoms.
-        Node* bottom = nullptr;
+
+        m_pointerSSA.reset();
+        m_allocationSSA.reset();
+
+        // Collect the set of "variables" that we will be sinking.
+        m_locationToVariable.clear();
+        m_nodeToVariable.clear();
+        Vector<Node*> indexToNode;
+        Vector<PromotedHeapLocation> indexToLocation;
+
+        for (Node* index : m_sinkCandidates) {
+            SSACalculator::Variable* variable = m_allocationSSA.newVariable();
+            m_nodeToVariable.add(index, variable);
+            ASSERT(indexToNode.size() == variable->index());
+            indexToNode.append(index);
+        }
+
+        for (PromotedHeapLocation location : locations) {
+            SSACalculator::Variable* variable = m_pointerSSA.newVariable();
+            m_locationToVariable.add(location, variable);
+            ASSERT(indexToLocation.size() == variable->index());
+            indexToLocation.append(location);
+        }
+
+        // We insert all required constants at top of block 0 so that
+        // they are inserted only once and we don't clutter the graph
+        // with useless constants everywhere
+        HashMap<FrozenValue*, Node*> lazyMapping;
+        if (!m_bottom)
+            m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
+
+        Vector<HashSet<PromotedHeapLocation>> hintsForPhi(m_sinkCandidates.size());
+
         for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-            if (block == m_graph.block(0))
-                bottom = m_insertionSet.insertNode(0, SpecNone, BottomValue, NodeOrigin());
-            
+            m_heap = m_heapAtHead[block];
+
             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                 Node* node = block->at(nodeIndex);
+
+                // Some named properties can be added conditionally,
+                // and that would necessitate bottoms
                 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
-                    m_insertionSet.insert(
-                        nodeIndex + 1, location.createHint(m_graph, node->origin, bottom));
+                    if (location.kind() != NamedPropertyPLoc)
+                        continue;
+
+                    SSACalculator::Variable* variable = m_locationToVariable.get(location);
+                    m_pointerSSA.newDef(variable, block, m_bottom);
+                }
+
+                for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
+                    Node* escapee = m_materializationToEscapee.get(materialization);
+                    m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
+                }
+
+                for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node)) {
+                    PromotedHeapLocation location = pair.first;
+                    Node* identifier = pair.second;
+                    // We're materializing `identifier` at this point, and the unmaterialized
+                    // version is inside `location`. We track which SSA variable this belongs
+                    // to in case we also need a PutHint for the Phi.
+                    if (UNLIKELY(validationEnabled())) {
+                        RELEASE_ASSERT(m_sinkCandidates.contains(location.base()));
+                        RELEASE_ASSERT(m_sinkCandidates.contains(identifier));
+
+                        bool found = false;
+                        for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
+                            // We're materializing `identifier` here. This asserts that this is indeed the case.
+                            if (m_materializationToEscapee.get(materialization) == identifier) {
+                                found = true;
+                                break;
+                            }
+                        }
+                        RELEASE_ASSERT(found);
+                    }
+
+                    SSACalculator::Variable* variable = m_nodeToVariable.get(identifier);
+                    hintsForPhi[variable->index()].addVoid(location);
                 }
+
+                if (m_sinkCandidates.contains(node))
+                    m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
+
+                handleNode(
+                    node,
+                    [&] (PromotedHeapLocation location, LazyNode value) {
+                        if (!locations.contains(location))
+                            return;
+
+                        Node* nodeValue;
+                        if (value.isNode())
+                            nodeValue = value.asNode();
+                        else {
+                            auto iter = lazyMapping.find(value.asValue());
+                            if (iter != lazyMapping.end())
+                                nodeValue = iter->value;
+                            else {
+                                nodeValue = value.ensureIsNode(
+                                    m_insertionSet, m_graph.block(0), 0);
+                                lazyMapping.add(value.asValue(), nodeValue);
+                            }
+                        }
+
+                        SSACalculator::Variable* variable = m_locationToVariable.get(location);
+                        m_pointerSSA.newDef(variable, block, nodeValue);
+                    },
+                    [] (PromotedHeapLocation) -> Node* {
+                        return nullptr;
+                    });
             }
-            m_insertionSet.execute(block);
         }
+        m_insertionSet.execute(m_graph.block(0));
 
-        m_ssaCalculator.reset();
+        // Run the SSA calculators to create Phis
+        m_pointerSSA.computePhis(
+            [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
+                PromotedHeapLocation location = indexToLocation[variable->index()];
 
-        // Collect the set of "variables" that we will be sinking.
-        m_locationToVariable.clear();
-        m_indexToLocation.clear();
-        for (PromotedHeapLocation location : locations) {
-            SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
-            m_locationToVariable.add(location, variable);
-            ASSERT(m_indexToLocation.size() == variable->index());
-            m_indexToLocation.append(location);
-        }
-        
-        // Create Defs from the existing hints.
-        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
-            for (Node* node : *block) {
-                promoteHeapAccess(
+                // Don't create Phi nodes for fields of dead allocations
+                if (!m_heapAtHead[block].isAllocation(location.base()))
+                    return nullptr;
+
+                // Don't create Phi nodes once we are escaped
+                if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
+                    return nullptr;
+
+                // If we point to a single allocation, we will
+                // directly use its materialization
+                if (m_heapAtHead[block].follow(location))
+                    return nullptr;
+
+                Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
+                phiNode->mergeFlags(NodeResultJS);
+                return phiNode;
+            });
+
+        m_allocationSSA.computePhis(
+            [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
+                Node* identifier = indexToNode[variable->index()];
+
+                // Don't create Phi nodes for dead allocations
+                if (!m_heapAtHead[block].isAllocation(identifier))
+                    return nullptr;
+
+                // Don't create Phi nodes until we are escaped
+                if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
+                    return nullptr;
+
+                Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
+                phiNode->mergeFlags(NodeResultJS);
+                return phiNode;
+            });
+
+        // Place Phis in the right places, replace all uses of any load with the appropriate
+        // value, and create the materialization nodes.
+        LocalOSRAvailabilityCalculator availabilityCalculator(m_graph);
+        m_graph.clearReplacements();
+        for (BasicBlock* block : m_graph.blocksInPreOrder()) {
+            m_heap = m_heapAtHead[block];
+            availabilityCalculator.beginBlock(block);
+
+            // These mapping tables are intended to be lazy. If
+            // something is omitted from the table, it means that
+            // there haven't been any local stores to the promoted
+            // heap location (or any local materialization).
+            m_localMapping.clear();
+            m_escapeeToMaterialization.clear();
+
+            // Insert the Phi functions that we had previously
+            // created.
+            for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
+                SSACalculator::Variable* variable = phiDef->variable();
+                m_insertionSet.insert(0, phiDef->value());
+
+                PromotedHeapLocation location = indexToLocation[variable->index()];
+                m_localMapping.set(location, phiDef->value());
+
+                if (m_sinkCandidates.contains(location.base())) {
+                    m_insertionSet.insert(
+                        0,
+                        location.createHint(
+                            m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
+                }
+            }
+
+            for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
+                SSACalculator::Variable* variable = phiDef->variable();
+                m_insertionSet.insert(0, phiDef->value());
+
+                Node* identifier = indexToNode[variable->index()];
+                m_escapeeToMaterialization.add(identifier, phiDef->value());
+                bool canExit = false;
+                insertOSRHintsForUpdate(
+                    0, block->at(0)->origin, canExit,
+                    availabilityCalculator.m_availability, identifier, phiDef->value());
+
+                for (PromotedHeapLocation location : hintsForPhi[variable->index()]) {
+                    m_insertionSet.insert(0,
+                        location.createHint(m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
+                    m_localMapping.set(location, phiDef->value());
+                }
+            }
+
+            if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
+                dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
+                dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
+            }
+
+            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
+                Node* node = block->at(nodeIndex);
+                bool canExit = true;
+                bool nextCanExit = node->origin.exitOK;
+                for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
+                    if (location.kind() != NamedPropertyPLoc)
+                        continue;
+
+                    m_localMapping.set(location, m_bottom);
+
+                    if (m_sinkCandidates.contains(node)) {
+                        if (DFGObjectAllocationSinkingPhaseInternal::verbose)
+                            dataLog("For sink candidate ", node, " found location ", location, "\n");
+                        m_insertionSet.insert(
+                            nodeIndex + 1,
+                            location.createHint(
+                                m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
+                    }
+                }
+
+                for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
+                    materialization->origin.exitOK &= canExit;
+                    Node* escapee = m_materializationToEscapee.get(materialization);
+                    populateMaterialization(block, materialization, escapee);
+                    m_escapeeToMaterialization.set(escapee, materialization);
+                    m_insertionSet.insert(nodeIndex, materialization);
+                    if (DFGObjectAllocationSinkingPhaseInternal::verbose)
+                        dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
+                }
+
+                for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
+                    m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
+                for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node))
+                    m_insertionSet.insert(nodeIndex, createRecovery(block, pair.first, node, canExit));
+
+                // We need to put the OSR hints after the recoveries,
+                // because we only want the hints once the object is
+                // complete
+                for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
+                    Node* escapee = m_materializationToEscapee.get(materialization);
+                    insertOSRHintsForUpdate(
+                        nodeIndex, node->origin, canExit,
+                        availabilityCalculator.m_availability, escapee, materialization);
+                }
+
+                if (node->origin.exitOK && !canExit) {
+                    // We indicate that the exit state is fine now. It is OK because we updated the
+                    // state above. We need to indicate this manually because the validation doesn't
+                    // have enough information to infer that the exit state is fine.
+                    m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
+                }
+
+                if (m_sinkCandidates.contains(node))
+                    m_escapeeToMaterialization.set(node, node);
+
+                availabilityCalculator.executeNode(node);
+
+                bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
+
+                bool doLower = false;
+                handleNode(
                     node,
-                    [&] (PromotedHeapLocation location, Edge value) {
+                    [&] (PromotedHeapLocation location, LazyNode value) {
+                        if (!locations.contains(location))
+                            return;
+
+                        Node* nodeValue;
+                        if (value.isNode())
+                            nodeValue = value.asNode();
+                        else
+                            nodeValue = lazyMapping.get(value.asValue());
+
+                        nodeValue = resolve(block, nodeValue);
+
+                        m_localMapping.set(location, nodeValue);
+
                         if (!m_sinkCandidates.contains(location.base()))
                             return;
-                        SSACalculator::Variable* variable = m_locationToVariable.get(location);
-                        m_ssaCalculator.newDef(variable, block, value.node());
+
+                        doLower = true;
+
+                        if (DFGObjectAllocationSinkingPhaseInternal::verbose)
+                            dataLog("Creating hint with value ", nodeValue, " before ", node, "\n");
+                        m_insertionSet.insert(
+                            nodeIndex + 1,
+                            location.createHint(
+                                m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
                     },
-                    [&] (PromotedHeapLocation) { });
-            }
-        }
-        
-        // OMG run the SSA calculator to create Phis!
-        m_ssaCalculator.computePhis(
-            [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
-                PromotedHeapLocation location = m_indexToLocation[variable->index()];
-                if (!block->ssa->liveAtHead.contains(location.base()))
-                    return nullptr;
-                
-                Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
-                phiNode->mergeFlags(NodeResultJS);
-                return phiNode;
-            });
-        
-        // Place Phis in the right places, replace all uses of any load with the appropriate
-        // value, and create the appropriate Upsilon nodes.
-        m_graph.clearReplacements();
-        for (BasicBlock* block : m_graph.blocksInPreOrder()) {
-            // This mapping table is intended to be lazy. If something is omitted from the table,
-            // it means that there haven't been any local stores to that promoted heap location.
-            m_localMapping.clear();
-            
-            // Insert the Phi functions that we had previously created.
-            for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
-                PromotedHeapLocation location = m_indexToLocation[phiDef->variable()->index()];
-                
-                m_insertionSet.insert(
-                    0, phiDef->value());
-                m_insertionSet.insert(
-                    0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
-                m_localMapping.add(location, phiDef->value());
-            }
-            
-            if (verbose)
-                dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
-            
-            // Process the block and replace all uses of loads with the promoted value.
-            for (Node* node : *block) {
-                m_graph.performSubstitution(node);
-                
-                if (Node* escapee = m_materializationToEscapee.get(node))
-                    populateMaterialize(block, node, escapee);
-                
-                promoteHeapAccess(
+                    [&] (PromotedHeapLocation location) -> Node* {
+                        return resolve(block, location);
+                    });
+
+                if (!nextCanExit && desiredNextExitOK) {
+                    // We indicate that the exit state is fine now. We need to do this because we
+                    // emitted hints that appear to invalidate the exit state.
+                    m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
+                }
+
+                if (m_sinkCandidates.contains(node) || doLower) {
+                    switch (node->op()) {
+                    case NewObject:
+                        node->convertToPhantomNewObject();
+                        break;
+
+                    case NewFunction:
+                        node->convertToPhantomNewFunction();
+                        break;
+
+                    case NewGeneratorFunction:
+                        node->convertToPhantomNewGeneratorFunction();
+                        break;
+
+                    case NewAsyncGeneratorFunction:
+                        node->convertToPhantomNewAsyncGeneratorFunction();
+                        break;
+
+                    case NewAsyncFunction:
+                        node->convertToPhantomNewAsyncFunction();
+                        break;
+
+                    case CreateActivation:
+                        node->convertToPhantomCreateActivation();
+                        break;
+
+                    case NewRegexp:
+                        node->convertToPhantomNewRegexp();
+                        break;
+
+                    default:
+                        node->remove(m_graph);
+                        break;
+                    }
+                }
+
+                m_graph.doToChildren(
                     node,
-                    [&] (PromotedHeapLocation location, Edge value) {
-                        if (m_sinkCandidates.contains(location.base()))
-                            m_localMapping.set(location, value.node());
-                    },
-                    [&] (PromotedHeapLocation location) {
-                        if (m_sinkCandidates.contains(location.base()))
-                            node->replaceWith(resolve(block, location));
+                    [&] (Edge& edge) {
+                        edge.setNode(resolve(block, edge.node()));
                     });
             }
-            
+
             // Gotta drop some Upsilons.
-            size_t upsilonInsertionPoint = block->size() - 1;
-            NodeOrigin upsilonOrigin = block->last()->origin;
+            NodeAndIndex terminal = block->findTerminal();
+            size_t upsilonInsertionPoint = terminal.index;
+            NodeOrigin upsilonOrigin = terminal.node->origin;
             for (BasicBlock* successorBlock : block->successors()) {
-                for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
+                for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
                     Node* phiNode = phiDef->value();
                     SSACalculator::Variable* variable = phiDef->variable();
-                    PromotedHeapLocation location = m_indexToLocation[variable->index()];
+                    PromotedHeapLocation location = indexToLocation[variable->index()];
                     Node* incoming = resolve(block, location);
-                    
+
+                    m_insertionSet.insertNode(
+                        upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
+                        OpInfo(phiNode), incoming->defaultEdge());
+                }
+
+                for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
+                    Node* phiNode = phiDef->value();
+                    SSACalculator::Variable* variable = phiDef->variable();
+                    Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
+
                     m_insertionSet.insertNode(
                         upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
                         OpInfo(phiNode), incoming->defaultEdge());
                 }
             }
-            
+
             m_insertionSet.execute(block);
         }
     }
-    
-    Node* resolve(BasicBlock* block, PromotedHeapLocation location)
+
+    NEVER_INLINE Node* resolve(BasicBlock* block, PromotedHeapLocation location)
     {
+        // If we are currently pointing to a single local allocation,
+        // simply return the associated materialization.
+        if (Node* identifier = m_heap.follow(location))
+            return getMaterialization(block, identifier);
+
         if (Node* result = m_localMapping.get(location))
             return result;
-        
+
         // This implies that there is no local mapping. Find a non-local mapping.
-        SSACalculator::Def* def = m_ssaCalculator.nonLocalReachingDef(
+        SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
             block, m_locationToVariable.get(location));
         ASSERT(def);
         ASSERT(def->value());
-        m_localMapping.add(location, def->value());
-        return def->value();
+
+        Node* result = def->value();
+        if (result->replacement())
+            result = result->replacement();
+        ASSERT(!result->replacement());
+
+        m_localMapping.add(location, result);
+        return result;
     }
 
-    template<typename SinkCandidateFunctor, typename EscapeFunctor>
-    void handleNode(
-        Node* node,
-        const SinkCandidateFunctor& sinkCandidate,
-        const EscapeFunctor& escape)
+    NEVER_INLINE Node* resolve(BasicBlock* block, Node* node)
     {
-        switch (node->op()) {
-        case NewObject:
-        case MaterializeNewObject:
-            sinkCandidate();
-            m_graph.doToChildren(
-                node,
-                [&] (Edge edge) {
-                    escape(edge.node());
-                });
-            break;
-            
-        case CheckStructure:
-        case GetByOffset:
-        case MultiGetByOffset:
-        case PutStructure:
-        case GetGetterSetterByOffset:
-        case MovHint:
-        case Phantom:
-        case Check:
-        case HardPhantom:
-        case StoreBarrier:
-        case StoreBarrierWithNullCheck:
-        case PutByOffsetHint:
-            break;
-            
-        case PutByOffset:
-            escape(node->child3().node());
-            break;
-            
-        case MultiPutByOffset:
-            // FIXME: In the future we should be able to handle this. It's just a matter of
-            // building the appropriate *Hint variant of this instruction, along with a
-            // PhantomStructureSelect node - since this transforms the Structure in a conditional
-            // way.
-            // https://bugs.webkit.org/show_bug.cgi?id=136924
-            escape(node->child1().node());
-            escape(node->child2().node());
-            break;
+        // If we are currently pointing to a single local allocation,
+        // simply return the associated materialization.
+        if (Node* identifier = m_heap.follow(node))
+            return getMaterialization(block, identifier);
 
-        default:
-            m_graph.doToChildren(
-                node,
-                [&] (Edge edge) {
-                    escape(edge.node());
-                });
-            break;
-        }
+        if (node->replacement())
+            node = node->replacement();
+        ASSERT(!node->replacement());
+
+        return node;
     }
-    
-    Node* createMaterialize(Node* escapee, Node* where)
+
+    NEVER_INLINE Node* getMaterialization(BasicBlock* block, Node* identifier)
+    {
+        ASSERT(m_heap.isAllocation(identifier));
+        if (!m_sinkCandidates.contains(identifier))
+            return identifier;
+
+        if (Node* materialization = m_escapeeToMaterialization.get(identifier))
+            return materialization;
+
+        SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
+            block, m_nodeToVariable.get(identifier));
+        ASSERT(def && def->value());
+        m_escapeeToMaterialization.add(identifier, def->value());
+        ASSERT(!def->value()->replacement());
+        return def->value();
+    }
+
+    void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
     {
-        Node* result = nullptr;
+        if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
+            dataLog("Inserting OSR hints at ", origin, ":\n");
+            dataLog("    Escapee: ", escapee, "\n");
+            dataLog("    Materialization: ", materialization, "\n");
+            dataLog("    Availability: ", availability, "\n");
+        }
         
-        switch (escapee->op()) {
-        case NewObject:
-        case MaterializeNewObject: {
-            ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
-            
-            result = m_graph.addNode(
-                escapee->prediction(), Node::VarArg, MaterializeNewObject,
-                NodeOrigin(
-                    escapee->origin.semantic,
-                    where->origin.forExit),
-                OpInfo(data), OpInfo(), 0, 0);
-            break;
+        // We need to follow() the value in the heap.
+        // Consider the following graph:
+        //
+        // Block #0
+        //   0: NewObject({})
+        //   1: NewObject({})
+        //   -: PutByOffset(@0, @1, x:0)
+        //   -: PutStructure(@0, {x:0})
+        //   2: GetByOffset(@0, x:0)
+        //   -: MovHint(@2, loc1)
+        //   -: Branch(#1, #2)
+        //
+        // Block #1
+        //   3: Call(f, @1)
+        //   4: Return(@0)
+        //
+        // Block #2
+        //   -: Return(undefined)
+        //
+        // We need to materialize @1 at @3, and when doing so we need
+        // to insert a MovHint for the materialization into loc1 as
+        // well.
+        // In order to do this, we say that we need to insert an
+        // update hint for any availability whose node resolve()s to
+        // the materialization.
+        for (auto entry : availability.m_heap) {
+            if (!entry.value.hasNode())
+                continue;
+            if (m_heap.follow(entry.value.node()) != escapee)
+                continue;
+
+            m_insertionSet.insert(
+                nodeIndex,
+                entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
         }
-            
-        default:
-            DFG_CRASH(m_graph, escapee, "Bad escapee op");
-            break;
+
+        for (unsigned i = availability.m_locals.size(); i--;) {
+            if (!availability.m_locals[i].hasNode())
+                continue;
+            if (m_heap.follow(availability.m_locals[i].node()) != escapee)
+                continue;
+
+            int operand = availability.m_locals.operandForIndex(i);
+            m_insertionSet.insertNode(
+                nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
+                materialization->defaultEdge());
         }
-        
-        if (verbose)
-            dataLog("Creating materialization point at ", where, " for ", escapee, ": ", result, "\n");
-        
-        m_materializationToEscapee.add(result, escapee);
-        m_materializationSiteToMaterializations.add(
-            where, Vector<Node*>()).iterator->value.append(result);
-        
-        return result;
     }
-    
-    void populateMaterialize(BasicBlock* block, Node* node, Node* escapee)
+
+    void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
     {
+        Allocation& allocation = m_heap.getAllocation(escapee);
         switch (node->op()) {
         case MaterializeNewObject: {
             ObjectMaterializationData& data = node->objectMaterializationData();
             unsigned firstChild = m_graph.m_varArgChildren.size();
-            
+
             Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
-            
-            PromotedHeapLocation structure(StructurePLoc, escapee);
+
+            PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
             ASSERT(locations.contains(structure));
-            
+
             m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
-            
-            for (unsigned i = 0; i < locations.size(); ++i) {
-                switch (locations[i].kind()) {
-                case StructurePLoc: {
-                    ASSERT(locations[i] == structure);
+
+            for (PromotedHeapLocation location : locations) {
+                switch (location.kind()) {
+                case StructurePLoc:
+                    ASSERT(location == structure);
                     break;
-                }
-                    
+
                 case NamedPropertyPLoc: {
-                    Node* value = resolve(block, locations[i]);
-                    if (value->op() == BottomValue) {
-                        // We can skip Bottoms entirely.
-                        break;
-                    }
-                    
-                    data.m_properties.append(PhantomPropertyValue(locations[i].info()));
-                    m_graph.m_varArgChildren.append(value);
+                    ASSERT(location.base() == allocation.identifier());
+                    data.m_properties.append(location.descriptor());
+                    Node* value = resolve(block, location);
+                    if (m_sinkCandidates.contains(value))
+                        m_graph.m_varArgChildren.append(m_bottom);
+                    else
+                        m_graph.m_varArgChildren.append(value);
                     break;
                 }
-                    
+
                 default:
                     DFG_CRASH(m_graph, node, "Bad location kind");
                 }
             }
-            
+
             node->children = AdjacencyList(
                 AdjacencyList::Variable,
                 firstChild, m_graph.m_varArgChildren.size() - firstChild);
             break;
         }
-            
+
+        case MaterializeCreateActivation: {
+            ObjectMaterializationData& data = node->objectMaterializationData();
+
+            unsigned firstChild = m_graph.m_varArgChildren.size();
+
+            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
+
+            PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
+            ASSERT(locations.contains(symbolTable));
+            ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
+            m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
+
+            PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
+            ASSERT(locations.contains(scope));
+            m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
+
+            for (PromotedHeapLocation location : locations) {
+                switch (location.kind()) {
+                case ActivationScopePLoc: {
+                    ASSERT(location == scope);
+                    break;
+                }
+
+                case ActivationSymbolTablePLoc: {
+                    ASSERT(location == symbolTable);
+                    break;
+                }
+
+                case ClosureVarPLoc: {
+                    ASSERT(location.base() == allocation.identifier());
+                    data.m_properties.append(location.descriptor());
+                    Node* value = resolve(block, location);
+                    if (m_sinkCandidates.contains(value))
+                        m_graph.m_varArgChildren.append(m_bottom);
+                    else
+                        m_graph.m_varArgChildren.append(value);
+                    break;
+                }
+
+                default:
+                    DFG_CRASH(m_graph, node, "Bad location kind");
+                }
+            }
+
+            node->children = AdjacencyList(
+                AdjacencyList::Variable,
+                firstChild, m_graph.m_varArgChildren.size() - firstChild);
+            break;
+        }
+        
+        case NewFunction:
+        case NewGeneratorFunction:
+        case NewAsyncGeneratorFunction:
+        case NewAsyncFunction: {
+            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
+            ASSERT(locations.size() == 2);
+                
+            PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
+            ASSERT_UNUSED(executable, locations.contains(executable));
+                
+            PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
+            ASSERT(locations.contains(activation));
+
+            node->child1() = Edge(resolve(block, activation), KnownCellUse);
+            break;
+        }
+
+        case NewRegexp: {
+            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
+            ASSERT(locations.size() == 2);
+
+            PromotedHeapLocation regExp(RegExpObjectRegExpPLoc, allocation.identifier());
+            ASSERT_UNUSED(regExp, locations.contains(regExp));
+
+            PromotedHeapLocation lastIndex(RegExpObjectLastIndexPLoc, allocation.identifier());
+            ASSERT(locations.contains(lastIndex));
+            Node* value = resolve(block, lastIndex);
+            if (m_sinkCandidates.contains(value))
+                node->child1() = Edge(m_bottom);
+            else
+                node->child1() = Edge(value);
+            break;
+        }
+
         default:
             DFG_CRASH(m_graph, node, "Bad materialize op");
+        }
+    }
+
+    Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
+    {
+        if (DFGObjectAllocationSinkingPhaseInternal::verbose)
+            dataLog("Recovering ", location, " at ", where, "\n");
+        ASSERT(location.base()->isPhantomAllocation());
+        Node* base = getMaterialization(block, location.base());
+        Node* value = resolve(block, location);
+
+        NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
+
+        if (DFGObjectAllocationSinkingPhaseInternal::verbose)
+            dataLog("Base is ", base, " and value is ", value, "\n");
+
+        if (base->isPhantomAllocation()) {
+            return PromotedHeapLocation(base, location.descriptor()).createHint(
+                m_graph, origin.takeValidExit(canExit), value);
+        }
+
+        switch (location.kind()) {
+        case NamedPropertyPLoc: {
+            Allocation& allocation = m_heap.getAllocation(location.base());
+
+            Vector<RegisteredStructure> structures;
+            structures.appendRange(allocation.structures().begin(), allocation.structures().end());
+            unsigned identifierNumber = location.info();
+            UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
+
+            std::sort(
+                structures.begin(),
+                structures.end(),
+                [uid] (RegisteredStructure a, RegisteredStructure b) -> bool {
+                    return a->getConcurrently(uid) < b->getConcurrently(uid);
+                });
+
+            RELEASE_ASSERT(structures.size());
+            PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
+
+            if (firstOffset == structures.last()->getConcurrently(uid)) {
+                Node* storage = base;
+                // FIXME: When we decide to sink objects with a
+                // property storage, we should handle non-inline offsets.
+                RELEASE_ASSERT(isInlineOffset(firstOffset));
+
+                StorageAccessData* data = m_graph.m_storageAccessData.add();
+                data->offset = firstOffset;
+                data->identifierNumber = identifierNumber;
+
+                return m_graph.addNode(
+                    PutByOffset,
+                    origin.takeValidExit(canExit),
+                    OpInfo(data),
+                    Edge(storage, KnownCellUse),
+                    Edge(base, KnownCellUse),
+                    value->defaultEdge());
+            }
+
+            MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
+            data->identifierNumber = identifierNumber;
+
+            {
+                PropertyOffset currentOffset = firstOffset;
+                StructureSet currentSet;
+                for (RegisteredStructure structure : structures) {
+                    PropertyOffset offset = structure->getConcurrently(uid);
+                    if (offset != currentOffset) {
+                        // Because our analysis treats MultiPutByOffset like an escape, we only have to
+                        // deal with storing results that would have been previously stored by PutByOffset
+                        // nodes. Those nodes were guarded by the appropriate type checks. This means that
+                        // at this point, we can simply trust that the incoming value has the right type
+                        // for whatever structure we are using.
+                        data->variants.append(
+                            PutByIdVariant::replace(currentSet, currentOffset));
+                        currentOffset = offset;
+                        currentSet.clear();
+                    }
+                    currentSet.add(structure.get());
+                }
+                data->variants.append(
+                    PutByIdVariant::replace(currentSet, currentOffset));
+            }
+
+            return m_graph.addNode(
+                MultiPutByOffset,
+                origin.takeValidExit(canExit),
+                OpInfo(data),
+                Edge(base, KnownCellUse),
+                value->defaultEdge());
+        }
+
+        case ClosureVarPLoc: {
+            return m_graph.addNode(
+                PutClosureVar,
+                origin.takeValidExit(canExit),
+                OpInfo(location.info()),
+                Edge(base, KnownCellUse),
+                value->defaultEdge());
+        }
+
+        case RegExpObjectLastIndexPLoc: {
+            return m_graph.addNode(
+                SetRegExpObjectLastIndex,
+                origin.takeValidExit(canExit),
+                OpInfo(true),
+                Edge(base, KnownCellUse),
+                value->defaultEdge());
+        }
+
+        default:
+            DFG_CRASH(m_graph, base, "Bad location kind");
             break;
         }
+
+        RELEASE_ASSERT_NOT_REACHED();
     }
     
-    SSACalculator m_ssaCalculator;
-    HashSet<Node*> m_sinkCandidates;
-    HashMap<Node*, Node*> m_materializationToEscapee;
-    HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
-    HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
+    void removeICStatusFilters()
+    {
+        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
+            for (Node* node : *block) {
+                switch (node->op()) {
+                case FilterCallLinkStatus:
+                case FilterGetByIdStatus:
+                case FilterPutByIdStatus:
+                case FilterInByIdStatus:
+                    if (node->child1()->isPhantomAllocation())
+                        node->removeWithoutChecks();
+                    break;
+                default:
+                    break;
+                }
+            }
+        }
+    }
+
+    // This is a great way of asking value->isStillValid() without having to worry about getting
+    // different answers. It turns out that this analysis works OK regardless of what this
+    // returns but breaks badly if this changes its mind for any particular InferredValue. This
+    // method protects us from that.
+    bool isStillValid(InferredValue* value)
+    {
+        return m_validInferredValues.add(value, value->isStillValid()).iterator->value;
+    }
+
+    SSACalculator m_pointerSSA;
+    SSACalculator m_allocationSSA;
+    NodeSet m_sinkCandidates;
     HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
-    Vector<PromotedHeapLocation> m_indexToLocation;
+    HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
     HashMap<PromotedHeapLocation, Node*> m_localMapping;
+    HashMap<Node*, Node*> m_escapeeToMaterialization;
     InsertionSet m_insertionSet;
+    CombinedLiveness m_combinedLiveness;
+
+    HashMap<InferredValue*, bool> m_validInferredValues;
+
+    HashMap<Node*, Node*> m_materializationToEscapee;
+    HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
+    HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
+    HashMap<Node*, Vector<std::pair<PromotedHeapLocation, Node*>>> m_materializationSiteToHints;
+
+    HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
+
+    BlockMap<LocalHeap> m_heapAtHead;
+    BlockMap<LocalHeap> m_heapAtTail;
+    LocalHeap m_heap;
+
+    Node* m_bottom = nullptr;
 };
-    
+
+}
+
 bool performObjectAllocationSinking(Graph& graph)
 {
-    SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
     return runPhase<ObjectAllocationSinkingPhase>(graph);
 }
 
 } } // namespace JSC::DFG
 
 #endif // ENABLE(DFG_JIT)
-