2 * Copyright (C) 2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGObjectAllocationSinkingPhase.h"
31 #include "DFGBlockMapInlines.h"
32 #include "DFGCombinedLiveness.h"
34 #include "DFGInsertionSet.h"
35 #include "DFGLazyNode.h"
36 #include "DFGLivenessAnalysisPhase.h"
37 #include "DFGOSRAvailabilityAnalysisPhase.h"
39 #include "DFGPromotedHeapLocation.h"
40 #include "DFGSSACalculator.h"
41 #include "DFGValidate.h"
42 #include "JSCInlines.h"
46 namespace JSC { namespace DFG {
52 // In order to sink object cycles, we use a points-to analysis coupled
53 // with an escape analysis. This analysis is actually similar to an
54 // abstract interpreter focused on local allocations and ignoring
57 // We represent the local heap using two mappings:
59 // - A set of the local allocations present in the function, where
60 // each of those have a further mapping from
61 // PromotedLocationDescriptor to local allocations they must point
64 // - A "pointer" mapping from nodes to local allocations, if they must
65 // be equal to said local allocation and are currently live. This
66 // can be because the node is the actual node that created the
67 // allocation, or any other node that must currently point to it -
68 // we don't make a difference.
70 // The following graph is a motivation for why we separate allocations
76 // -: PutByOffset(@0, @1, x)
77 // -: PutStructure(@0, {x:0})
78 // 2: GetByOffset(@0, x)
84 // Here, we need to remember in block #1 that @2 points to a local
85 // allocation with appropriate fields and structures information
86 // (because we should be able to place a materialization on top of
87 // block #1 here), even though @1 is dead. We *could* just keep @1
88 // artificially alive here, but there is no real reason to do it:
89 // after all, by the end of block #0, @1 and @2 should be completely
90 // interchangeable, and there is no reason for us to artificially make
93 // An important point to consider to understand this separation is
94 // that we should think of the local heap as follow: we have a
95 // bunch of nodes that are pointers to "allocations" that live
96 // someplace on the heap, and those allocations can have pointers in
97 // between themselves as well. We shouldn't care about whatever
98 // names we give to the allocations ; what matters when
99 // comparing/merging two heaps is the isomorphism/comparison between
100 // the allocation graphs as seen by the nodes.
102 // For instance, in the following graph:
110 // -: PutByOffset(@0, @1, x)
111 // -: PutStructure(@0, {x:0})
116 // -: PutByOffset(@2, undefined, x)
117 // -: PutStructure(@2, {x:0})
118 // -: PutByOffset(@0, @2, x)
119 // -: PutStructure(@0, {x:0})
125 // we should think of the heaps at tail of blocks #1 and #2 as being
126 // exactly the same, even though one has @0.x pointing to @1 and the
127 // other has @0.x pointing to @2, because in essence this should not
128 // be different from the graph where we hoisted @1 and @2 into a
129 // single allocation in block #0. We currently will not handle this
130 // case, because we merge allocations based on the node they are
131 // coming from, but this is only a technicality for the sake of
132 // simplicity that shouldn't hide the deeper idea outlined here.
136 // We use Escaped as a special allocation kind because when we
137 // decide to sink an allocation, we still need to keep track of it
138 // once it is escaped if it still has pointers to it in order to
139 // replace any use of those pointers by the corresponding
141 enum class Kind { Escaped, Object, Activation, Function };
143 explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
144 : m_identifier(identifier)
150 const HashMap<PromotedLocationDescriptor, Node*>& fields() const
155 Node* get(PromotedLocationDescriptor descriptor)
157 return m_fields.get(descriptor);
160 Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
162 // Pointing to anything else than an unescaped local
163 // allocation is represented by simply not having the
166 m_fields.set(descriptor, value);
168 m_fields.remove(descriptor);
172 void remove(PromotedLocationDescriptor descriptor)
174 set(descriptor, nullptr);
177 bool hasStructures() const
188 Allocation& setStructures(const StructureSet& structures)
190 ASSERT(hasStructures() && !structures.isEmpty());
191 m_structures = structures;
195 Allocation& mergeStructures(const StructureSet& structures)
197 ASSERT(hasStructures() || structures.isEmpty());
198 m_structures.merge(structures);
202 Allocation& filterStructures(const StructureSet& structures)
204 ASSERT(hasStructures());
205 m_structures.filter(structures);
209 const StructureSet& structures() const
214 Node* identifier() const { return m_identifier; }
216 Kind kind() const { return m_kind; }
218 bool isEscapedAllocation() const
220 return kind() == Kind::Escaped;
223 bool isObjectAllocation() const
225 return m_kind == Kind::Object;
228 bool isActivationAllocation() const
230 return m_kind == Kind::Activation;
233 bool isFunctionAllocation() const
235 return m_kind == Kind::Function;
238 bool operator==(const Allocation& other) const
240 return m_identifier == other.m_identifier
241 && m_kind == other.m_kind
242 && m_fields == other.m_fields
243 && m_structures == other.m_structures;
246 bool operator!=(const Allocation& other) const
248 return !(*this == other);
251 void dump(PrintStream& out) const
253 dumpInContext(out, nullptr);
256 void dumpInContext(PrintStream& out, DumpContext* context) const
260 out.print("Escaped");
268 out.print("Function");
271 case Kind::Activation:
272 out.print("Activation");
275 out.print("Allocation(");
276 if (!m_structures.isEmpty())
277 out.print(inContext(m_structures, context));
278 if (!m_fields.isEmpty()) {
279 if (!m_structures.isEmpty())
281 out.print(mapDump(m_fields, " => #", ", "));
287 Node* m_identifier; // This is the actual node that created the allocation
289 HashMap<PromotedLocationDescriptor, Node*> m_fields;
290 StructureSet m_structures;
295 Allocation& newAllocation(Node* node, Allocation::Kind kind)
297 ASSERT(!m_pointers.contains(node) && !isAllocation(node));
298 m_pointers.add(node, node);
299 return m_allocations.set(node, Allocation(node, kind)).iterator->value;
302 bool isAllocation(Node* identifier) const
304 return m_allocations.contains(identifier);
307 // Note that this is fundamentally different from
308 // onlyLocalAllocation() below. getAllocation() takes as argument
309 // a node-as-identifier, that is, an allocation node. This
310 // allocation node doesn't have to be alive; it may only be
311 // pointed to by other nodes or allocation fields.
312 // For instance, in the following graph:
317 // -: PutByOffset(@0, @1, x)
318 // -: PutStructure(@0, {x:0})
319 // 2: GetByOffset(@0, x)
325 // At head of block #1, the only reachable allocation is #@1,
326 // which can be reached through node @2. Thus, getAllocation(#@1)
327 // contains the appropriate metadata for this allocation, but
328 // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
329 // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
330 // is the same as getAllocation(#@1), while getAllocation(#@2)
331 // does not make sense since @2 is not an allocation node.
333 // This is meant to be used when the node is already known to be
334 // an identifier (i.e. an allocation) - probably because it was
335 // found as value of a field or pointer in the current heap, or
336 // was the result of a call to follow(). In any other cases (such
337 // as when doing anything while traversing the graph), the
338 // appropriate function to call is probably onlyLocalAllocation.
339 Allocation& getAllocation(Node* identifier)
341 auto iter = m_allocations.find(identifier);
342 ASSERT(iter != m_allocations.end());
346 void newPointer(Node* node, Node* identifier)
348 ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
349 ASSERT(isAllocation(identifier));
350 m_pointers.add(node, identifier);
353 // follow solves the points-to problem. Given a live node, which
354 // may be either an allocation itself or a heap read (e.g. a
355 // GetByOffset node), it returns the corresponding allocation
356 // node, if there is one. If the argument node is neither an
357 // allocation or a heap read, or may point to different nodes,
358 // nullptr will be returned. Note that a node that points to
359 // different nodes can never point to an unescaped local
361 Node* follow(Node* node) const
363 auto iter = m_pointers.find(node);
364 ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
365 return iter == m_pointers.end() ? nullptr : iter->value;
368 Node* follow(PromotedHeapLocation location) const
370 const Allocation& base = m_allocations.find(location.base())->value;
371 auto iter = base.fields().find(location.descriptor());
373 if (iter == base.fields().end())
379 // onlyLocalAllocation find the corresponding allocation metadata
380 // for any live node. onlyLocalAllocation(node) is essentially
381 // getAllocation(follow(node)), with appropriate null handling.
382 Allocation* onlyLocalAllocation(Node* node)
384 Node* identifier = follow(node);
388 return &getAllocation(identifier);
391 Allocation* onlyLocalAllocation(PromotedHeapLocation location)
393 Node* identifier = follow(location);
397 return &getAllocation(identifier);
400 // This allows us to store the escapees only when necessary. If
401 // set, the current escapees can be retrieved at any time using
402 // takeEscapees(), which will clear the cached set of escapees;
403 // otherwise the heap won't remember escaping allocations.
404 void setWantEscapees()
406 m_wantEscapees = true;
409 HashMap<Node*, Allocation> takeEscapees()
411 return WTF::move(m_escapees);
414 void escape(Node* node)
416 Node* identifier = follow(node);
420 escapeAllocation(identifier);
423 void merge(const LocalHeap& other)
426 other.assertIsValid();
427 ASSERT(!m_wantEscapees);
430 ASSERT(other.reached());
435 HashSet<Node*> toEscape;
437 for (auto& allocationEntry : other.m_allocations)
438 m_allocations.add(allocationEntry.key, allocationEntry.value);
439 for (auto& allocationEntry : m_allocations) {
440 auto allocationIter = other.m_allocations.find(allocationEntry.key);
442 // If we have it and they don't, it died for them but we
443 // are keeping it alive from another field somewhere.
444 // There is nothing to do - we will be escaped
445 // automatically when we handle that other field.
446 // This will also happen for allocation that we have and
447 // they don't, and all of those will get pruned.
448 if (allocationIter == other.m_allocations.end())
451 if (allocationEntry.value.kind() != allocationIter->value.kind()) {
452 toEscape.add(allocationEntry.key);
453 for (const auto& fieldEntry : allocationIter->value.fields())
454 toEscape.add(fieldEntry.value);
457 allocationEntry.value.fields(), allocationIter->value.fields(),
458 [&] (Node* identifier) {
459 toEscape.add(identifier);
461 [&] (PromotedLocationDescriptor field) {
462 allocationEntry.value.remove(field);
464 allocationEntry.value.mergeStructures(allocationIter->value.structures());
468 mergePointerSets(m_pointers, other.m_pointers,
469 [&] (Node* identifier) {
470 toEscape.add(identifier);
473 m_pointers.remove(field);
476 for (Node* identifier : toEscape)
477 escapeAllocation(identifier);
479 if (!ASSERT_DISABLED) {
480 for (const auto& entry : m_allocations)
481 ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
484 // If there is no remaining pointer to an allocation, we can
485 // remove it. This should only happen for escaped allocations,
486 // because we only merge liveness-pruned heaps in the first
493 void pruneByLiveness(const HashSet<Node*>& live)
495 Vector<Node*> toRemove;
496 for (const auto& entry : m_pointers) {
497 if (!live.contains(entry.key))
498 toRemove.append(entry.key);
500 for (Node* node : toRemove)
501 m_pointers.remove(node);
506 void assertIsValid() const
511 // Pointers should point to an actual allocation
512 for (const auto& entry : m_pointers) {
513 ASSERT_UNUSED(entry, entry.value);
514 ASSERT(m_allocations.contains(entry.value));
517 for (const auto& allocationEntry : m_allocations) {
518 // Fields should point to an actual allocation
519 for (const auto& fieldEntry : allocationEntry.value.fields()) {
520 ASSERT_UNUSED(fieldEntry, fieldEntry.value);
521 ASSERT(m_allocations.contains(fieldEntry.value));
526 bool operator==(const LocalHeap& other) const
529 other.assertIsValid();
530 return m_allocations == other.m_allocations
531 && m_pointers == other.m_pointers;
534 bool operator!=(const LocalHeap& other) const
536 return !(*this == other);
539 const HashMap<Node*, Allocation> allocations() const
541 return m_allocations;
544 const HashMap<Node*, Node*> pointers() const
549 void dump(PrintStream& out) const
551 out.print(" Allocations:\n");
552 for (const auto& entry : m_allocations)
553 out.print(" #", entry.key, ": ", entry.value, "\n");
554 out.print(" Pointers:\n");
555 for (const auto& entry : m_pointers)
556 out.print(" ", entry.key, " => #", entry.value, "\n");
570 // When we merge two heaps, we escape all fields of allocations,
571 // unless they point to the same thing in both heaps.
572 // The reason for this is that it allows us not to do extra work
573 // for diamond graphs where we would otherwise have to check
574 // whether we have a single definition or not, which would be
577 // Note that we should try to unify nodes even when they are not
578 // from the same allocation; for instance we should be able to
579 // completely eliminate all allocations from the following graph:
587 // -: PutByOffset(@1, "left", val)
588 // -: PutStructure(@1, {val:0})
589 // -: PutByOffset(@0, @1, x)
590 // -: PutStructure(@0, {x:0})
595 // -: PutByOffset(@2, "right", val)
596 // -: PutStructure(@2, {val:0})
597 // -: PutByOffset(@0, @2, x)
598 // -: PutStructure(@0, {x:0})
602 // 3: GetByOffset(@0, x)
603 // 4: GetByOffset(@3, val)
605 template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
606 void mergePointerSets(
607 const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
608 const EscapeFunctor& escape, const RemoveFunctor& remove)
610 Vector<Key> toRemove;
611 for (const auto& entry : my) {
612 auto iter = their.find(entry.key);
613 if (iter == their.end()) {
614 toRemove.append(entry.key);
616 } else if (iter->value != entry.value) {
617 toRemove.append(entry.key);
622 for (const auto& entry : their) {
623 if (my.contains(entry.key))
627 for (Key key : toRemove)
631 void escapeAllocation(Node* identifier)
633 Allocation& allocation = getAllocation(identifier);
634 if (allocation.isEscapedAllocation())
637 Allocation unescaped = WTF::move(allocation);
638 allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
640 for (const auto& entry : unescaped.fields())
641 escapeAllocation(entry.value);
644 m_escapees.add(unescaped.identifier(), WTF::move(unescaped));
649 HashSet<Node*> reachable;
650 for (const auto& entry : m_pointers)
651 reachable.add(entry.value);
653 // Repeatedly mark as reachable allocations in fields of other
654 // reachable allocations
656 Vector<Node*> worklist;
657 worklist.appendRange(reachable.begin(), reachable.end());
659 while (!worklist.isEmpty()) {
660 Node* identifier = worklist.takeLast();
661 Allocation& allocation = m_allocations.find(identifier)->value;
662 for (const auto& entry : allocation.fields()) {
663 if (reachable.add(entry.value).isNewEntry)
664 worklist.append(entry.value);
669 // Remove unreachable allocations
671 Vector<Node*> toRemove;
672 for (const auto& entry : m_allocations) {
673 if (!reachable.contains(entry.key))
674 toRemove.append(entry.key);
676 for (Node* identifier : toRemove)
677 m_allocations.remove(identifier);
681 bool m_reached = false;
682 HashMap<Node*, Node*> m_pointers;
683 HashMap<Node*, Allocation> m_allocations;
685 bool m_wantEscapees = false;
686 HashMap<Node*, Allocation> m_escapees;
689 class ObjectAllocationSinkingPhase : public Phase {
691 ObjectAllocationSinkingPhase(Graph& graph)
692 : Phase(graph, "object allocation elimination")
693 , m_pointerSSA(graph)
694 , m_allocationSSA(graph)
695 , m_insertionSet(graph)
701 ASSERT(m_graph.m_form == SSA);
702 ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
704 if (!performSinking())
708 dataLog("Graph after elimination:\n");
716 bool performSinking()
718 m_graph.computeRefCounts();
719 m_graph.initializeNodeOwners();
720 performLivenessAnalysis(m_graph);
721 performOSRAvailabilityAnalysis(m_graph);
722 m_combinedLiveness = CombinedLiveness(m_graph);
724 CString graphBeforeSinking;
725 if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
726 StringPrintStream out;
728 graphBeforeSinking = out.toCString();
732 dataLog("Graph before elimination:\n");
738 if (!determineSinkCandidates())
742 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
743 dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
744 dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
750 if (Options::validateGraphAtEachPhase())
751 validate(m_graph, DumpGraph, graphBeforeSinking);
755 void performAnalysis()
757 m_heapAtHead = BlockMap<LocalHeap>(m_graph);
758 m_heapAtTail = BlockMap<LocalHeap>(m_graph);
763 dataLog("Doing iteration of escape analysis.\n");
766 for (BasicBlock* block : m_graph.blocksInPreOrder()) {
767 m_heapAtHead[block].setReached();
768 m_heap = m_heapAtHead[block];
770 for (Node* node : *block) {
773 [] (PromotedHeapLocation, LazyNode) { },
774 [&] (PromotedHeapLocation) -> Node* {
779 if (m_heap == m_heapAtTail[block])
782 m_heapAtTail[block] = m_heap;
785 m_heap.assertIsValid();
787 // We keep only pointers that are live, and only
788 // allocations that are either live, pointed to by a
789 // live pointer, or (recursively) stored in a field of
790 // a live allocation.
792 // This means we can accidentaly leak non-dominating
793 // nodes into the successor. However, due to the
794 // non-dominance property, we are guaranteed that the
795 // successor has at least one predecessor that is not
796 // dominated either: this means any reference to a
797 // non-dominating allocation in the successor will
798 // trigger an escape and get pruned during the merge.
799 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
801 for (BasicBlock* successorBlock : block->successors())
802 m_heapAtHead[successorBlock].merge(m_heap);
807 template<typename WriteFunctor, typename ResolveFunctor>
810 const WriteFunctor& heapWrite,
811 const ResolveFunctor& heapResolve)
813 m_heap.assertIsValid();
814 ASSERT(m_heap.takeEscapees().isEmpty());
816 Allocation* target = nullptr;
817 HashMap<PromotedLocationDescriptor, LazyNode> writes;
818 PromotedLocationDescriptor exactRead;
820 switch (node->op()) {
822 target = &m_heap.newAllocation(node, Allocation::Kind::Object);
823 target->setStructures(node->structure());
825 StructurePLoc, LazyNode(m_graph.freeze(node->structure())));
828 case MaterializeNewObject: {
829 target = &m_heap.newAllocation(node, Allocation::Kind::Object);
830 target->setStructures(node->structureSet());
832 StructurePLoc, LazyNode(m_graph.varArgChild(node, 0).node()));
833 for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
835 PromotedLocationDescriptor(
837 node->objectMaterializationData().m_properties[i].m_identifierNumber),
838 LazyNode(m_graph.varArgChild(node, i + 1).node()));
844 if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid()) {
845 m_heap.escape(node->child1().node());
848 target = &m_heap.newAllocation(node, Allocation::Kind::Function);
849 writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
850 writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
854 case CreateActivation: {
855 if (node->castOperand<SymbolTable*>()->singletonScope()->isStillValid()) {
856 m_heap.escape(node->child1().node());
859 target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
860 writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
861 writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
863 SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
864 ConcurrentJITLocker locker(symbolTable->m_lock);
865 LazyNode undefined(m_graph.freeze(jsUndefined()));
866 for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
868 PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
875 case MaterializeCreateActivation: {
876 // We have sunk this once already - there is no way the
877 // watchpoint is still valid.
878 ASSERT(!node->castOperand<SymbolTable*>()->singletonScope()->isStillValid());
879 target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
880 writes.add(ActivationSymbolTablePLoc, LazyNode(m_graph.varArgChild(node, 0).node()));
881 writes.add(ActivationScopePLoc, LazyNode(m_graph.varArgChild(node, 1).node()));
882 for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
884 PromotedLocationDescriptor(
886 node->objectMaterializationData().m_properties[i].m_identifierNumber),
887 LazyNode(m_graph.varArgChild(node, i + 2).node()));
893 target = m_heap.onlyLocalAllocation(node->child1().node());
894 if (target && target->isObjectAllocation()) {
895 writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next))));
896 target->setStructures(node->transition()->next);
898 m_heap.escape(node->child1().node());
901 case CheckStructure: {
902 Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
903 if (allocation && allocation->isObjectAllocation()) {
904 allocation->filterStructures(node->structureSet());
905 if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
906 node->convertToCheckStructureImmediate(value);
908 m_heap.escape(node->child1().node());
913 case GetGetterSetterByOffset:
914 target = m_heap.onlyLocalAllocation(node->child2().node());
915 if (target && target->isObjectAllocation()) {
916 unsigned identifierNumber = node->storageAccessData().identifierNumber;
917 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
919 m_heap.escape(node->child1().node());
920 m_heap.escape(node->child2().node());
924 case MultiGetByOffset:
925 target = m_heap.onlyLocalAllocation(node->child1().node());
926 if (target && target->isObjectAllocation()) {
927 unsigned identifierNumber = node->multiGetByOffsetData().identifierNumber;
928 exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
930 m_heap.escape(node->child1().node());
934 target = m_heap.onlyLocalAllocation(node->child2().node());
935 if (target && target->isObjectAllocation()) {
936 unsigned identifierNumber = node->storageAccessData().identifierNumber;
938 PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
939 LazyNode(node->child3().node()));
941 m_heap.escape(node->child1().node());
942 m_heap.escape(node->child2().node());
943 m_heap.escape(node->child3().node());
948 target = m_heap.onlyLocalAllocation(node->child1().node());
949 if (target && target->isActivationAllocation()) {
951 PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
953 m_heap.escape(node->child1().node());
957 target = m_heap.onlyLocalAllocation(node->child1().node());
958 if (target && target->isActivationAllocation()) {
960 PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
961 LazyNode(node->child2().node()));
963 m_heap.escape(node->child1().node());
964 m_heap.escape(node->child2().node());
969 target = m_heap.onlyLocalAllocation(node->child1().node());
970 if (target && target->isActivationAllocation())
971 exactRead = ActivationScopePLoc;
973 m_heap.escape(node->child1().node());
977 target = m_heap.onlyLocalAllocation(node->child1().node());
978 if (target && target->isFunctionAllocation())
979 exactRead = FunctionExecutablePLoc;
981 m_heap.escape(node->child1().node());
985 target = m_heap.onlyLocalAllocation(node->child1().node());
986 if (target && target->isFunctionAllocation())
987 exactRead = FunctionActivationPLoc;
989 m_heap.escape(node->child1().node());
993 m_graph.doToChildren(
996 if (edge.willNotHaveCheck())
999 if (alreadyChecked(edge.useKind(), SpecObject))
1002 m_heap.escape(edge.node());
1008 // Handled by OSR availability analysis
1012 m_graph.doToChildren(
1015 m_heap.escape(edge.node());
1022 ASSERT(writes.isEmpty());
1023 if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1024 ASSERT(!value->replacement());
1025 node->replaceWith(value);
1027 Node* identifier = target->get(exactRead);
1029 m_heap.newPointer(node, identifier);
1032 for (auto entry : writes) {
1034 if (entry.value.isNode())
1035 target->set(entry.key, m_heap.follow(entry.value.asNode()));
1037 target->remove(entry.key);
1038 heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1041 m_heap.assertIsValid();
1044 bool determineSinkCandidates()
1046 m_sinkCandidates.clear();
1047 m_materializationToEscapee.clear();
1048 m_materializationSiteToMaterializations.clear();
1049 m_materializationSiteToRecoveries.clear();
1051 // Logically we wish to consider every allocation and sink
1052 // it. However, it is probably not profitable to sink an
1053 // allocation that will always escape. So, we only sink an
1054 // allocation if one of the following is true:
1056 // 1) There exists a basic block with only backwards outgoing
1057 // edges (or no outgoing edges) in which the node wasn't
1058 // materialized. This is meant to catch
1059 // effectively-infinite loops in which we don't need to
1060 // have allocated the object.
1062 // 2) There exists a basic block at the tail of which the node
1063 // is dead and not materialized.
1065 // 3) The sum of execution counts of the materializations is
1066 // less than the sum of execution counts of the original
1069 // We currently implement only rule #2.
1070 // FIXME: Implement the two other rules.
1071 // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1072 // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1074 // However, these rules allow for a sunk object to be put into
1075 // a non-sunk one, which we don't support. We could solve this
1076 // by supporting PutHints on local allocations, making these
1077 // objects only partially correct, and we would need to adapt
1078 // the OSR availability analysis and OSR exit to handle
1079 // this. This would be totally doable, but would create a
1080 // super rare, and thus bug-prone, code path.
1081 // So, instead, we need to implement one of the following
1084 // 1) If we put a sink candidate into a local allocation that
1085 // is not a sink candidate, change our minds and don't
1086 // actually sink the sink candidate.
1088 // 2) If we put a sink candidate into a local allocation, that
1089 // allocation becomes a sink candidate as well.
1091 // We currently choose to implement closure rule #2.
1092 HashMap<Node*, Vector<Node*>> dependencies;
1093 bool hasUnescapedReads = false;
1094 for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1095 m_heap = m_heapAtHead[block];
1097 for (Node* node : *block) {
1100 [&] (PromotedHeapLocation location, LazyNode value) {
1101 if (!value.isNode())
1104 Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1105 if (allocation && !allocation->isEscapedAllocation())
1106 dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1108 [&] (PromotedHeapLocation) -> Node* {
1109 hasUnescapedReads = true;
1114 // The sink candidates are initially the unescaped
1115 // allocations dying at tail of blocks
1116 HashSet<Node*> allocations;
1117 for (const auto& entry : m_heap.allocations()) {
1118 if (!entry.value.isEscapedAllocation())
1119 allocations.add(entry.key);
1122 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1124 for (Node* identifier : allocations) {
1125 if (!m_heap.isAllocation(identifier))
1126 m_sinkCandidates.add(identifier);
1130 // Ensure that the set of sink candidates is closed for put operations
1131 Vector<Node*> worklist;
1132 worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1134 while (!worklist.isEmpty()) {
1135 for (Node* identifier : dependencies.get(worklist.takeLast())) {
1136 if (m_sinkCandidates.add(identifier).isNewEntry)
1137 worklist.append(identifier);
1141 if (m_sinkCandidates.isEmpty())
1142 return hasUnescapedReads;
1145 dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1147 // Create the materialization nodes
1148 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1149 m_heap = m_heapAtHead[block];
1150 m_heap.setWantEscapees();
1152 for (Node* node : *block) {
1155 [] (PromotedHeapLocation, LazyNode) { },
1156 [] (PromotedHeapLocation) -> Node* {
1159 auto escapees = m_heap.takeEscapees();
1160 if (!escapees.isEmpty())
1161 placeMaterializations(escapees, node);
1164 m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1167 HashMap<Node*, Allocation> escapingOnEdge;
1168 for (const auto& entry : m_heap.allocations()) {
1169 if (entry.value.isEscapedAllocation())
1172 bool mustEscape = false;
1173 for (BasicBlock* successorBlock : block->successors()) {
1174 if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1175 || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1180 escapingOnEdge.add(entry.key, entry.value);
1182 placeMaterializations(WTF::move(escapingOnEdge), block->terminal());
1186 return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1189 void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1191 // We don't create materializations if the escapee is not a
1193 Vector<Node*> toRemove;
1194 for (const auto& entry : escapees) {
1195 if (!m_sinkCandidates.contains(entry.key))
1196 toRemove.append(entry.key);
1198 for (Node* identifier : toRemove)
1199 escapees.remove(identifier);
1201 if (escapees.isEmpty())
1204 // First collect the hints that will be needed when the node
1205 // we materialize is still stored into other unescaped sink candidates
1206 Vector<PromotedHeapLocation> hints;
1207 for (const auto& entry : m_heap.allocations()) {
1208 if (escapees.contains(entry.key))
1211 for (const auto& field : entry.value.fields()) {
1212 ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1213 if (escapees.contains(field.value) && !field.key.neededForMaterialization())
1214 hints.append(PromotedHeapLocation(entry.key, field.key));
1218 // Now we need to order the materialization. Any order is
1219 // valid (as long as we materialize a node first if it is
1220 // needed for the materialization of another node, e.g. a
1221 // function's activation must be materialized before the
1222 // function itself), but we want to try minimizing the number
1223 // of times we have to place Puts to close cycles after a
1224 // materialization. In other words, we are trying to find the
1225 // minimum number of materializations to remove from the
1226 // materialization graph to make it a DAG, known as the
1227 // (vertex) feedback set problem. Unfortunately, this is a
1228 // NP-hard problem, which we don't want to solve exactly.
1230 // Instead, we use a simple greedy procedure, that procedes as
1232 // - While there is at least one node with no outgoing edge
1233 // amongst the remaining materializations, materialize it
1236 // - Similarily, while there is at least one node with no
1237 // incoming edge amongst the remaining materializations,
1238 // materialize it last.
1240 // - When both previous conditions are false, we have an
1241 // actual cycle, and we need to pick a node to
1242 // materialize. We try greedily to remove the "pressure" on
1243 // the remaining nodes by choosing the node with maximum
1244 // |incoming edges| * |outgoing edges| as a measure of how
1245 // "central" to the graph it is. We materialize it first,
1246 // so that all the recoveries will be Puts of things into
1247 // it (rather than Puts of the materialization into other
1248 // objects), which means we will have a single
1252 // Compute dependencies between materializations
1253 HashMap<Node*, HashSet<Node*>> dependencies;
1254 HashMap<Node*, HashSet<Node*>> reverseDependencies;
1255 HashMap<Node*, HashSet<Node*>> forMaterialization;
1256 for (const auto& entry : escapees) {
1257 auto& myDependencies = dependencies.add(entry.key, HashSet<Node*>()).iterator->value;
1258 auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, HashSet<Node*>()).iterator->value;
1259 reverseDependencies.add(entry.key, HashSet<Node*>());
1260 for (const auto& field : entry.value.fields()) {
1261 if (escapees.contains(field.value) && field.value != entry.key) {
1262 myDependencies.add(field.value);
1263 reverseDependencies.add(field.value, HashSet<Node*>()).iterator->value.add(entry.key);
1264 if (field.key.neededForMaterialization())
1265 myDependenciesForMaterialization.add(field.value);
1270 // Helper function to update the materialized set and the
1272 HashSet<Node*> materialized;
1273 auto materialize = [&] (Node* identifier) {
1274 materialized.add(identifier);
1275 for (Node* dep : dependencies.get(identifier))
1276 reverseDependencies.find(dep)->value.remove(identifier);
1277 for (Node* rdep : reverseDependencies.get(identifier)) {
1278 dependencies.find(rdep)->value.remove(identifier);
1279 forMaterialization.find(rdep)->value.remove(identifier);
1281 dependencies.remove(identifier);
1282 reverseDependencies.remove(identifier);
1283 forMaterialization.remove(identifier);
1286 // Nodes without remaining unmaterialized fields will be
1287 // materialized first - amongst the remaining unmaterialized
1289 std::list<Allocation> toMaterialize;
1290 auto firstPos = toMaterialize.begin();
1291 auto materializeFirst = [&] (Allocation&& allocation) {
1292 materialize(allocation.identifier());
1293 // We need to insert *after* the current position
1294 if (firstPos != toMaterialize.end())
1296 firstPos = toMaterialize.insert(firstPos, WTF::move(allocation));
1299 // Nodes that no other unmaterialized node points to will be
1300 // materialized last - amongst the remaining unmaterialized
1302 auto lastPos = toMaterialize.end();
1303 auto materializeLast = [&] (Allocation&& allocation) {
1304 materialize(allocation.identifier());
1305 lastPos = toMaterialize.insert(lastPos, WTF::move(allocation));
1308 // These are the promoted locations that contains some of the
1309 // allocations we are currently escaping. If they are a location on
1310 // some other allocation we are currently materializing, we will need
1311 // to "recover" their value with a real put once the corresponding
1312 // allocation is materialized; if they are a location on some other
1313 // not-yet-materialized allocation, we will need a PutHint.
1314 Vector<PromotedHeapLocation> toRecover;
1316 // This loop does the actual cycle breaking
1317 while (!escapees.isEmpty()) {
1318 materialized.clear();
1320 // Materialize nodes that won't require recoveries if we can
1321 for (auto& entry : escapees) {
1322 if (!forMaterialization.find(entry.key)->value.isEmpty())
1325 if (dependencies.find(entry.key)->value.isEmpty()) {
1326 materializeFirst(WTF::move(entry.value));
1330 if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1331 materializeLast(WTF::move(entry.value));
1336 // We reach this only if there is an actual cycle that needs
1337 // breaking. Because we do not want to solve a NP-hard problem
1338 // here, we just heuristically pick a node and materialize it
1340 if (materialized.isEmpty()) {
1341 uint64_t maxEvaluation = 0;
1342 Allocation* bestAllocation;
1343 for (auto& entry : escapees) {
1344 if (!forMaterialization.find(entry.key)->value.isEmpty())
1347 uint64_t evaluation =
1348 static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1349 if (evaluation > maxEvaluation) {
1350 maxEvaluation = evaluation;
1351 bestAllocation = &entry.value;
1354 RELEASE_ASSERT(maxEvaluation > 0);
1356 materializeFirst(WTF::move(*bestAllocation));
1358 RELEASE_ASSERT(!materialized.isEmpty());
1360 for (Node* identifier : materialized)
1361 escapees.remove(identifier);
1364 materialized.clear();
1366 HashSet<Node*> escaped;
1367 for (const Allocation& allocation : toMaterialize)
1368 escaped.add(allocation.identifier());
1369 for (const Allocation& allocation : toMaterialize) {
1370 for (const auto& field : allocation.fields()) {
1371 if (escaped.contains(field.value) && !materialized.contains(field.value))
1372 toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1374 materialized.add(allocation.identifier());
1377 Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1378 where, Vector<Node*>()).iterator->value;
1380 for (const Allocation& allocation : toMaterialize) {
1381 Node* materialization = createMaterialization(allocation, where);
1382 materializations.append(materialization);
1383 m_materializationToEscapee.add(materialization, allocation.identifier());
1386 if (!toRecover.isEmpty()) {
1387 m_materializationSiteToRecoveries.add(
1388 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
1391 // The hints need to be after the "real" recoveries so that we
1392 // don't hint not-yet-complete objects
1393 if (!hints.isEmpty()) {
1394 m_materializationSiteToRecoveries.add(
1395 where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(hints);
1399 Node* createMaterialization(const Allocation& allocation, Node* where)
1401 // FIXME: This is the only place where we actually use the
1402 // fact that an allocation's identifier is indeed the node
1403 // that created the allocation.
1404 switch (allocation.kind()) {
1405 case Allocation::Kind::Object: {
1406 ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1407 StructureSet* set = m_graph.addStructureSet(allocation.structures());
1409 return m_graph.addNode(
1410 allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1412 allocation.identifier()->origin.semantic,
1413 where->origin.forExit),
1414 OpInfo(set), OpInfo(data), 0, 0);
1417 case Allocation::Kind::Function: {
1418 FrozenValue* executable = allocation.identifier()->cellOperand();
1420 return m_graph.addNode(
1421 allocation.identifier()->prediction(), NewFunction,
1423 allocation.identifier()->origin.semantic,
1424 where->origin.forExit),
1425 OpInfo(executable));
1429 case Allocation::Kind::Activation: {
1430 ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1431 FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1433 return m_graph.addNode(
1434 allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1436 allocation.identifier()->origin.semantic,
1437 where->origin.forExit),
1438 OpInfo(symbolTable), OpInfo(data), 0, 0);
1442 DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1446 void promoteLocalHeap()
1448 // Collect the set of heap locations that we will be operating
1450 HashSet<PromotedHeapLocation> locations;
1451 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1452 m_heap = m_heapAtHead[block];
1454 for (Node* node : *block) {
1457 [&] (PromotedHeapLocation location, LazyNode) {
1458 // If the location is not on a sink candidate,
1459 // we only sink it if it is read
1460 if (m_sinkCandidates.contains(location.base()))
1461 locations.add(location);
1463 [&] (PromotedHeapLocation location) -> Node* {
1464 locations.add(location);
1470 // Figure out which locations belong to which allocations.
1471 m_locationsForAllocation.clear();
1472 for (PromotedHeapLocation location : locations) {
1473 auto result = m_locationsForAllocation.add(
1475 Vector<PromotedHeapLocation>());
1476 ASSERT(!result.iterator->value.contains(location));
1477 result.iterator->value.append(location);
1480 m_pointerSSA.reset();
1481 m_allocationSSA.reset();
1483 // Collect the set of "variables" that we will be sinking.
1484 m_locationToVariable.clear();
1485 m_nodeToVariable.clear();
1486 Vector<Node*> indexToNode;
1487 Vector<PromotedHeapLocation> indexToLocation;
1489 for (Node* index : m_sinkCandidates) {
1490 SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1491 m_nodeToVariable.add(index, variable);
1492 ASSERT(indexToNode.size() == variable->index());
1493 indexToNode.append(index);
1496 for (PromotedHeapLocation location : locations) {
1497 SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1498 m_locationToVariable.add(location, variable);
1499 ASSERT(indexToLocation.size() == variable->index());
1500 indexToLocation.append(location);
1503 // We insert all required constants at top of block 0 so that
1504 // they are inserted only once and we don't clutter the graph
1505 // with useless constants everywhere
1506 HashMap<FrozenValue*, Node*> lazyMapping;
1508 m_bottom = m_insertionSet.insertConstant(0, NodeOrigin(), jsNumber(1927));
1509 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1510 m_heap = m_heapAtHead[block];
1512 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1513 Node* node = block->at(nodeIndex);
1515 // Some named properties can be added conditionally,
1516 // and that would necessitate bottoms
1517 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1518 if (location.kind() != NamedPropertyPLoc)
1521 SSACalculator::Variable* variable = m_locationToVariable.get(location);
1522 m_pointerSSA.newDef(variable, block, m_bottom);
1525 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1526 Node* escapee = m_materializationToEscapee.get(materialization);
1527 m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1530 if (m_sinkCandidates.contains(node))
1531 m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1535 [&] (PromotedHeapLocation location, LazyNode value) {
1536 if (!locations.contains(location))
1541 nodeValue = value.asNode();
1543 auto iter = lazyMapping.find(value.asValue());
1544 if (iter != lazyMapping.end())
1545 nodeValue = iter->value;
1547 nodeValue = value.ensureIsNode(
1548 m_insertionSet, m_graph.block(0), 0);
1549 lazyMapping.add(value.asValue(), nodeValue);
1553 SSACalculator::Variable* variable = m_locationToVariable.get(location);
1554 m_pointerSSA.newDef(variable, block, nodeValue);
1556 [] (PromotedHeapLocation) -> Node* {
1561 m_insertionSet.execute(m_graph.block(0));
1563 // Run the SSA calculators to create Phis
1564 m_pointerSSA.computePhis(
1565 [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1566 PromotedHeapLocation location = indexToLocation[variable->index()];
1568 // Don't create Phi nodes for fields of dead allocations
1569 if (!m_heapAtHead[block].isAllocation(location.base()))
1572 // Don't create Phi nodes once we are escaped
1573 if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1576 // If we point to a single allocation, we will
1577 // directly use its materialization
1578 if (m_heapAtHead[block].follow(location))
1581 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
1582 phiNode->mergeFlags(NodeResultJS);
1586 m_allocationSSA.computePhis(
1587 [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1588 Node* identifier = indexToNode[variable->index()];
1590 // Don't create Phi nodes for dead allocations
1591 if (!m_heapAtHead[block].isAllocation(identifier))
1594 // Don't create Phi nodes until we are escaped
1595 if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1598 Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
1599 phiNode->mergeFlags(NodeResultJS);
1603 // Place Phis in the right places, replace all uses of any load with the appropriate
1604 // value, and create the materialization nodes.
1605 LocalOSRAvailabilityCalculator availabilityCalculator;
1606 m_graph.clearReplacements();
1607 for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1608 m_heap = m_heapAtHead[block];
1609 availabilityCalculator.beginBlock(block);
1611 // These mapping tables are intended to be lazy. If
1612 // something is omitted from the table, it means that
1613 // there haven't been any local stores to the promoted
1614 // heap location (or any local materialization).
1615 m_localMapping.clear();
1616 m_escapeeToMaterialization.clear();
1618 // Insert the Phi functions that we had previously
1620 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1621 SSACalculator::Variable* variable = phiDef->variable();
1622 m_insertionSet.insert(0, phiDef->value());
1624 PromotedHeapLocation location = indexToLocation[variable->index()];
1625 m_localMapping.set(location, phiDef->value());
1627 if (m_sinkCandidates.contains(location.base())) {
1628 m_insertionSet.insert(
1629 0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
1633 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1634 SSACalculator::Variable* variable = phiDef->variable();
1635 m_insertionSet.insert(0, phiDef->value());
1637 Node* identifier = indexToNode[variable->index()];
1638 m_escapeeToMaterialization.add(identifier, phiDef->value());
1639 insertOSRHintsForUpdate(0, NodeOrigin(), availabilityCalculator.m_availability, identifier, phiDef->value());
1643 dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1644 dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1647 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1648 Node* node = block->at(nodeIndex);
1649 for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1650 if (location.kind() != NamedPropertyPLoc)
1653 m_localMapping.set(location, m_bottom);
1655 if (m_sinkCandidates.contains(node)) {
1656 m_insertionSet.insert(
1658 location.createHint(m_graph, node->origin, m_bottom));
1662 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1663 Node* escapee = m_materializationToEscapee.get(materialization);
1664 populateMaterialization(block, materialization, escapee);
1665 m_escapeeToMaterialization.set(escapee, materialization);
1666 m_insertionSet.insert(nodeIndex, materialization);
1668 dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1671 for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1672 m_insertionSet.insert(nodeIndex, createRecovery(block, location, node));
1674 // We need to put the OSR hints after the recoveries,
1675 // because we only want the hints once the object is
1677 for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1678 Node* escapee = m_materializationToEscapee.get(materialization);
1679 insertOSRHintsForUpdate(
1680 nodeIndex, node->origin,
1681 availabilityCalculator.m_availability, escapee, materialization);
1684 if (m_sinkCandidates.contains(node))
1685 m_escapeeToMaterialization.set(node, node);
1687 availabilityCalculator.executeNode(node);
1689 bool doLower = false;
1692 [&] (PromotedHeapLocation location, LazyNode value) {
1693 if (!locations.contains(location))
1698 nodeValue = value.asNode();
1700 nodeValue = lazyMapping.get(value.asValue());
1702 nodeValue = resolve(block, nodeValue);
1704 m_localMapping.set(location, nodeValue);
1706 if (!m_sinkCandidates.contains(location.base()))
1711 m_insertionSet.insert(nodeIndex + 1,
1712 location.createHint(m_graph, node->origin, nodeValue));
1714 [&] (PromotedHeapLocation location) -> Node* {
1715 return resolve(block, location);
1718 if (m_sinkCandidates.contains(node) || doLower) {
1719 switch (node->op()) {
1721 case MaterializeNewObject:
1722 node->convertToPhantomNewObject();
1726 node->convertToPhantomNewFunction();
1729 case CreateActivation:
1730 case MaterializeCreateActivation:
1731 node->convertToPhantomCreateActivation();
1740 m_graph.doToChildren(
1743 edge.setNode(resolve(block, edge.node()));
1747 // Gotta drop some Upsilons.
1748 NodeAndIndex terminal = block->findTerminal();
1749 size_t upsilonInsertionPoint = terminal.index;
1750 NodeOrigin upsilonOrigin = terminal.node->origin;
1751 for (BasicBlock* successorBlock : block->successors()) {
1752 for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1753 Node* phiNode = phiDef->value();
1754 SSACalculator::Variable* variable = phiDef->variable();
1755 PromotedHeapLocation location = indexToLocation[variable->index()];
1756 Node* incoming = resolve(block, location);
1758 m_insertionSet.insertNode(
1759 upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1760 OpInfo(phiNode), incoming->defaultEdge());
1763 for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1764 Node* phiNode = phiDef->value();
1765 SSACalculator::Variable* variable = phiDef->variable();
1766 Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1768 m_insertionSet.insertNode(
1769 upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1770 OpInfo(phiNode), incoming->defaultEdge());
1774 m_insertionSet.execute(block);
1778 Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1780 // If we are currently pointing to a single local allocation,
1781 // simply return the associated materialization.
1782 if (Node* identifier = m_heap.follow(location))
1783 return getMaterialization(block, identifier);
1785 if (Node* result = m_localMapping.get(location))
1788 // This implies that there is no local mapping. Find a non-local mapping.
1789 SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1790 block, m_locationToVariable.get(location));
1792 ASSERT(def->value());
1794 Node* result = def->value();
1796 ASSERT(!result->replacement());
1798 m_localMapping.add(location, result);
1802 Node* resolve(BasicBlock* block, Node* node)
1804 // If we are currently pointing to a single local allocation,
1805 // simply return the associated materialization.
1806 if (Node* identifier = m_heap.follow(node))
1807 return getMaterialization(block, identifier);
1809 if (node->replacement())
1810 node = node->replacement();
1811 ASSERT(!node->replacement());
1816 Node* getMaterialization(BasicBlock* block, Node* identifier)
1818 ASSERT(m_heap.isAllocation(identifier));
1819 if (!m_sinkCandidates.contains(identifier))
1822 if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1823 return materialization;
1825 SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1826 block, m_nodeToVariable.get(identifier));
1827 ASSERT(def && def->value());
1828 m_escapeeToMaterialization.add(identifier, def->value());
1829 ASSERT(!def->value()->replacement());
1830 return def->value();
1833 void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, AvailabilityMap& availability, Node* escapee, Node* materialization)
1835 // We need to follow() the value in the heap.
1836 // Consider the following graph:
1841 // -: PutByOffset(@0, @1, x:0)
1842 // -: PutStructure(@0, {x:0})
1843 // 2: GetByOffset(@0, x:0)
1844 // -: MovHint(@2, loc1)
1845 // -: Branch(#1, #2)
1852 // -: Return(undefined)
1854 // We need to materialize @1 at @3, and when doing so we need
1855 // to insert a MovHint for the materialization into loc1 as
1857 // In order to do this, we say that we need to insert an
1858 // update hint for any availability whose node resolve()s to
1859 // the materialization.
1860 for (auto entry : availability.m_heap) {
1861 if (!entry.value.hasNode())
1863 if (m_heap.follow(entry.value.node()) != escapee)
1866 m_insertionSet.insert(
1867 nodeIndex, entry.key.createHint(m_graph, origin, materialization));
1870 for (unsigned i = availability.m_locals.size(); i--;) {
1871 if (!availability.m_locals[i].hasNode())
1873 if (m_heap.follow(availability.m_locals[i].node()) != escapee)
1876 int operand = availability.m_locals.operandForIndex(i);
1877 m_insertionSet.insertNode(
1878 nodeIndex, SpecNone, MovHint, origin, OpInfo(operand),
1879 materialization->defaultEdge());
1883 void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
1885 Allocation& allocation = m_heap.getAllocation(escapee);
1886 switch (node->op()) {
1887 case MaterializeNewObject: {
1888 ObjectMaterializationData& data = node->objectMaterializationData();
1889 unsigned firstChild = m_graph.m_varArgChildren.size();
1891 Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1893 PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
1894 ASSERT(locations.contains(structure));
1896 m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
1898 for (PromotedHeapLocation location : locations) {
1899 switch (location.kind()) {
1901 ASSERT(location == structure);
1904 case NamedPropertyPLoc: {
1905 ASSERT(location.base() == allocation.identifier());
1906 data.m_properties.append(PhantomPropertyValue(location.info()));
1907 Node* value = resolve(block, location);
1908 if (m_sinkCandidates.contains(value))
1909 m_graph.m_varArgChildren.append(m_bottom);
1911 m_graph.m_varArgChildren.append(value);
1916 DFG_CRASH(m_graph, node, "Bad location kind");
1920 node->children = AdjacencyList(
1921 AdjacencyList::Variable,
1922 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1926 case MaterializeCreateActivation: {
1927 ObjectMaterializationData& data = node->objectMaterializationData();
1929 unsigned firstChild = m_graph.m_varArgChildren.size();
1931 Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1933 PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
1934 ASSERT(locations.contains(symbolTable));
1935 ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
1936 m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
1938 PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
1939 ASSERT(locations.contains(scope));
1940 m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
1942 for (PromotedHeapLocation location : locations) {
1943 switch (location.kind()) {
1944 case ActivationScopePLoc: {
1945 ASSERT(location == scope);
1949 case ActivationSymbolTablePLoc: {
1950 ASSERT(location == symbolTable);
1954 case ClosureVarPLoc: {
1955 ASSERT(location.base() == allocation.identifier());
1956 data.m_properties.append(PhantomPropertyValue(location.info()));
1957 Node* value = resolve(block, location);
1958 if (m_sinkCandidates.contains(value))
1959 m_graph.m_varArgChildren.append(m_bottom);
1961 m_graph.m_varArgChildren.append(value);
1966 DFG_CRASH(m_graph, node, "Bad location kind");
1970 node->children = AdjacencyList(
1971 AdjacencyList::Variable,
1972 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1977 Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1978 ASSERT(locations.size() == 2);
1980 PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
1981 ASSERT_UNUSED(executable, locations.contains(executable));
1983 PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
1984 ASSERT(locations.contains(activation));
1986 node->child1() = Edge(resolve(block, activation), KnownCellUse);
1991 DFG_CRASH(m_graph, node, "Bad materialize op");
1995 Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where)
1998 dataLog("Recovering ", location, " at ", where, "\n");
1999 ASSERT(location.base()->isPhantomAllocation());
2000 Node* base = getMaterialization(block, location.base());
2001 Node* value = resolve(block, location);
2004 dataLog("Base is ", base, " and value is ", value, "\n");
2006 if (base->isPhantomAllocation()) {
2007 return PromotedHeapLocation(base, location.descriptor()).createHint(
2010 base->origin.semantic,
2011 where->origin.forExit),
2015 switch (location.kind()) {
2016 case NamedPropertyPLoc: {
2017 Allocation& allocation = m_heap.getAllocation(location.base());
2019 Vector<Structure*> structures;
2020 structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2021 unsigned identifierNumber = location.info();
2022 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2027 [uid] (Structure *a, Structure* b) -> bool {
2028 return a->getConcurrently(uid) < b->getConcurrently(uid);
2031 PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2033 if (firstOffset == structures.last()->getConcurrently(uid)) {
2034 Node* storage = base;
2035 // FIXME: When we decide to sink objects with a
2036 // property storage, we should handle non-inline offsets.
2037 RELEASE_ASSERT(isInlineOffset(firstOffset));
2039 StorageAccessData* data = m_graph.m_storageAccessData.add();
2040 data->offset = firstOffset;
2041 data->identifierNumber = identifierNumber;
2043 return m_graph.addNode(
2048 Edge(storage, KnownCellUse),
2049 Edge(base, KnownCellUse),
2050 value->defaultEdge());
2053 MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2054 data->identifierNumber = identifierNumber;
2057 PropertyOffset currentOffset = firstOffset;
2058 StructureSet currentSet;
2059 for (Structure* structure : structures) {
2060 PropertyOffset offset = structure->getConcurrently(uid);
2061 if (offset != currentOffset) {
2062 data->variants.append(
2063 PutByIdVariant::replace(currentSet, currentOffset));
2064 currentOffset = offset;
2067 currentSet.add(structure);
2069 data->variants.append(PutByIdVariant::replace(currentSet, currentOffset));
2072 return m_graph.addNode(
2076 base->origin.semantic,
2077 where->origin.forExit),
2079 Edge(base, KnownCellUse),
2080 value->defaultEdge());
2084 case ClosureVarPLoc: {
2085 return m_graph.addNode(
2089 base->origin.semantic,
2090 where->origin.forExit),
2091 OpInfo(location.info()),
2092 Edge(base, KnownCellUse),
2093 value->defaultEdge());
2098 DFG_CRASH(m_graph, base, "Bad location kind");
2103 SSACalculator m_pointerSSA;
2104 SSACalculator m_allocationSSA;
2105 HashSet<Node*> m_sinkCandidates;
2106 HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2107 HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2108 HashMap<PromotedHeapLocation, Node*> m_localMapping;
2109 HashMap<Node*, Node*> m_escapeeToMaterialization;
2110 InsertionSet m_insertionSet;
2111 CombinedLiveness m_combinedLiveness;
2113 HashMap<Node*, Node*> m_materializationToEscapee;
2114 HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2115 HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2117 HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2119 BlockMap<LocalHeap> m_heapAtHead;
2120 BlockMap<LocalHeap> m_heapAtTail;
2123 Node* m_bottom = nullptr;
2128 bool performObjectAllocationSinking(Graph& graph)
2130 SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
2131 return runPhase<ObjectAllocationSinkingPhase>(graph);
2134 } } // namespace JSC::DFG
2136 #endif // ENABLE(DFG_JIT)