2 * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGClobberize_h
27 #define DFGClobberize_h
31 #include "DFGAbstractHeap.h"
32 #include "DFGEdgeUsesStructure.h"
34 #include "DFGHeapLocation.h"
35 #include "DFGLazyNode.h"
36 #include "DFGPureValue.h"
38 namespace JSC { namespace DFG {
40 template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor>
41 void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def)
45 // - The canonical way of clobbering the world is to read world and write
46 // heap. This is because World subsumes Heap and Stack, and Stack can be
47 // read by anyone but only written to by explicit stack writing operations.
48 // Of course, claiming to also write World is not wrong; it'll just
49 // pessimise some important optimizations.
51 // - We cannot hoist, or sink, anything that has effects. This means that the
52 // easiest way of indicating that something cannot be hoisted is to claim
53 // that it side-effects some miscellaneous thing.
55 // - We cannot hoist forward-exiting nodes without some additional effort. I
56 // believe that what it comes down to is that forward-exiting generally have
57 // their NodeExitsForward cleared upon hoist, except for forward-exiting
58 // nodes that take bogus state as their input. Those are substantially
59 // harder. We disable it for now. In the future we could enable it by having
60 // versions of those nodes that backward-exit instead, but I'm not convinced
63 // - Some nodes lie, and claim that they do not read the JSCell_structureID,
64 // JSCell_typeInfoFlags, etc. These are nodes that use the structure in a way
65 // that does not depend on things that change under structure transitions.
67 // - It's implicitly understood that OSR exits read the world. This is why we
68 // generally don't move or eliminate stores. Every node can exit, so the
69 // read set does not reflect things that would be read if we exited.
70 // Instead, the read set reflects what the node will have to read if it
73 // - Broadly, we don't say that we're reading something if that something is
76 // - We try to make this work even prior to type inference, just so that we
77 // can use it for IR dumps. No promises on whether the answers are sound
78 // prior to type inference - though they probably could be if we did some
81 // - If you do read(Stack) or read(World), then make sure that readTop() in
82 // PreciseLocalClobberize is correct.
84 // While read() and write() are fairly self-explanatory - they track what sorts of things the
85 // node may read or write - the def() functor is more tricky. It tells you the heap locations
86 // (not just abstract heaps) that are defined by a node. A heap location comprises an abstract
87 // heap, some nodes, and a LocationKind. Briefly, a location defined by a node is a location
88 // whose value can be deduced from looking at the node itself. The locations returned must obey
89 // the following properties:
91 // - If someone wants to CSE a load from the heap, then a HeapLocation object should be
92 // sufficient to find a single matching node.
94 // - The abstract heap is the only abstract heap that could be clobbered to invalidate any such
95 // CSE attempt. I.e. if clobberize() reports that on every path between some node and a node
96 // that defines a HeapLocation that it wanted, there were no writes to any abstract heap that
97 // overlap the location's heap, then we have a sound match. Effectively, the semantics of
98 // write() and def() are intertwined such that for them to be sound they must agree on what
101 // read(), write(), and def() for heap locations is enough to do GCSE on effectful things. To
102 // keep things simple, this code will also def() pure things. def() must be overloaded to also
103 // accept PureValue. This way, a client of clobberize() can implement GCSE entirely using the
104 // information that clobberize() passes to write() and def(). Other clients of clobberize() can
105 // just ignore def() by using a NoOpClobberize functor.
107 if (edgesUseStructure(graph, node))
108 read(JSCell_structureID);
110 switch (node->op()) {
114 def(PureValue(node, node->constant()));
120 case ExtractOSREntryLocal:
121 case CheckStructureImmediate:
142 case LoadArrowFunctionThis:
144 case StringCharCodeAt:
145 case StringFromCharCode:
146 case CompareStrictEq:
157 case BooleanToNumber:
165 def(PureValue(node));
168 case HasGenericProperty:
169 case HasStructureProperty:
170 case GetEnumerableLength:
171 case GetPropertyEnumerator: {
177 case GetDirectPname: {
178 // This reads and writes heap because it can end up calling a generic getByVal
179 // if the Structure changed, which could in turn end up calling a getter.
186 case GetEnumeratorStructurePname:
187 case GetEnumeratorGenericPname: {
188 def(PureValue(node));
192 case HasIndexedProperty: {
193 read(JSObject_butterfly);
194 ArrayMode mode = node->arrayMode();
195 switch (mode.type()) {
197 if (mode.isInBounds()) {
198 read(Butterfly_publicLength);
199 read(IndexedInt32Properties);
200 def(HeapLocation(HasIndexedPropertyLoc, IndexedInt32Properties, node->child1(), node->child2()), LazyNode(node));
207 case Array::Double: {
208 if (mode.isInBounds()) {
209 read(Butterfly_publicLength);
210 read(IndexedDoubleProperties);
211 def(HeapLocation(HasIndexedPropertyLoc, IndexedDoubleProperties, node->child1(), node->child2()), LazyNode(node));
218 case Array::Contiguous: {
219 if (mode.isInBounds()) {
220 read(Butterfly_publicLength);
221 read(IndexedContiguousProperties);
222 def(HeapLocation(HasIndexedPropertyLoc, IndexedContiguousProperties, node->child1(), node->child2()), LazyNode(node));
229 case Array::ArrayStorage: {
230 if (mode.isInBounds()) {
231 read(Butterfly_vectorLength);
232 read(IndexedArrayStorageProperties);
245 RELEASE_ASSERT_NOT_REACHED();
256 def(PureValue(node, node->arithMode()));
260 switch (node->binaryUseKind()) {
264 def(PureValue(node, node->arithMode()));
271 DFG_CRASH(graph, node, "Bad use kind");
275 def(PureValue(node, static_cast<uintptr_t>(node->arithRoundingMode())));
279 def(PureValue(CheckCell, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->cellOperand()));
283 def(PureValue(CheckNotEmpty, AdjacencyList(AdjacencyList::Fixed, node->child1())));
287 def(PureValue(CheckIdent, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->uidOperand()));
290 case ConstantStoragePointer:
291 def(PureValue(node, node->storagePointer()));
310 case CheckTierUpInLoop:
311 case CheckTierUpAtReturn:
312 case CheckTierUpAndOSREnter:
313 case CheckTierUpWithNestedTriggerAndOSREnter:
316 case ProfileWillCall:
319 case ProfileControlFlow:
325 case InvalidationPoint:
327 def(HeapLocation(InvalidationPointLoc, Watchpoint_fire), LazyNode(node));
331 read(AbstractHeap(Stack, node->local()));
336 write(Watchpoint_fire);
340 case CreateActivation: {
341 SymbolTable* table = node->castOperand<SymbolTable*>();
342 if (table->singletonScope()->isStillValid())
343 write(Watchpoint_fire);
344 read(HeapObjectCount);
345 write(HeapObjectCount);
349 case CreateDirectArguments:
350 case CreateScopedArguments:
351 case CreateClonedArguments:
353 read(HeapObjectCount);
354 write(HeapObjectCount);
357 case PhantomDirectArguments:
358 case PhantomClonedArguments:
359 // DFG backend requires that the locals that this reads are flushed. FTL backend can handle those
360 // locals being promoted.
361 if (!isFTL(graph.m_plan.mode))
364 // Even though it's phantom, it still has the property that one can't be replaced with another.
365 read(HeapObjectCount);
366 write(HeapObjectCount);
372 read(HeapObjectCount);
373 write(HeapObjectCount);
376 case VarInjectionWatchpoint:
378 def(HeapLocation(VarInjectionWatchpointLoc, MiscFields), LazyNode(node));
383 def(HeapLocation(IsObjectOrNullLoc, MiscFields, node->child1()), LazyNode(node));
388 def(HeapLocation(IsFunctionLoc, MiscFields, node->child1()), LazyNode(node));
398 case PutGetterSetterById:
404 case TailCallInlinedCaller:
407 case CallForwardVarargs:
408 case TailCallVarargsInlinedCaller:
409 case TailCallForwardVarargsInlinedCaller:
410 case ConstructVarargs:
411 case ConstructForwardVarargs:
420 case TailCallVarargs:
421 case TailCallForwardVarargs:
427 read(GetterSetter_getter);
428 def(HeapLocation(GetterLoc, GetterSetter_getter, node->child1()), LazyNode(node));
432 read(GetterSetter_setter);
433 def(HeapLocation(SetterLoc, GetterSetter_setter, node->child1()), LazyNode(node));
437 read(AbstractHeap(Stack, JSStack::Callee));
438 def(HeapLocation(StackLoc, AbstractHeap(Stack, JSStack::Callee)), LazyNode(node));
441 case GetArgumentCount:
442 read(AbstractHeap(Stack, JSStack::ArgumentCount));
443 def(HeapLocation(StackPayloadLoc, AbstractHeap(Stack, JSStack::ArgumentCount)), LazyNode(node));
451 read(AbstractHeap(Stack, node->local()));
452 def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node));
456 write(AbstractHeap(Stack, node->local()));
457 def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node->child1().node()));
461 AbstractHeap heap(Stack, node->stackAccessData()->local);
463 def(HeapLocation(StackLoc, heap), LazyNode(node));
468 AbstractHeap heap(Stack, node->stackAccessData()->local);
470 def(HeapLocation(StackLoc, heap), LazyNode(node->child1().node()));
477 LoadVarargsData* data = node->loadVarargsData();
478 write(AbstractHeap(Stack, data->count.offset()));
479 for (unsigned i = data->limit; i--;)
480 write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i)));
484 case ForwardVarargs: {
485 // We could be way more precise here.
488 LoadVarargsData* data = node->loadVarargsData();
489 write(AbstractHeap(Stack, data->count.offset()));
490 for (unsigned i = data->limit; i--;)
491 write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i)));
495 case GetLocalUnlinked:
496 read(AbstractHeap(Stack, node->unlinkedLocal()));
497 def(HeapLocation(StackLoc, AbstractHeap(Stack, node->unlinkedLocal())), LazyNode(node));
501 ArrayMode mode = node->arrayMode();
502 switch (mode.type()) {
503 case Array::SelectUsingPredictions:
504 case Array::Unprofiled:
505 case Array::SelectUsingArguments:
506 // Assume the worst since we don't have profiling yet.
511 case Array::ForceExit:
521 if (mode.isOutOfBounds()) {
526 // This appears to read nothing because it's only reading immutable data.
527 def(PureValue(node, mode.asWord()));
530 case Array::DirectArguments:
531 read(DirectArgumentsProperties);
532 def(HeapLocation(IndexedPropertyLoc, DirectArgumentsProperties, node->child1(), node->child2()), LazyNode(node));
535 case Array::ScopedArguments:
536 read(ScopeProperties);
537 def(HeapLocation(IndexedPropertyLoc, ScopeProperties, node->child1(), node->child2()), LazyNode(node));
541 if (mode.isInBounds()) {
542 read(Butterfly_publicLength);
543 read(IndexedInt32Properties);
544 def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, node->child1(), node->child2()), LazyNode(node));
552 if (mode.isInBounds()) {
553 read(Butterfly_publicLength);
554 read(IndexedDoubleProperties);
555 def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, node->child1(), node->child2()), LazyNode(node));
562 case Array::Contiguous:
563 if (mode.isInBounds()) {
564 read(Butterfly_publicLength);
565 read(IndexedContiguousProperties);
566 def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, node->child1(), node->child2()), LazyNode(node));
573 case Array::Undecided:
574 def(PureValue(node));
577 case Array::ArrayStorage:
578 case Array::SlowPutArrayStorage:
579 if (mode.isInBounds()) {
580 read(Butterfly_vectorLength);
581 read(IndexedArrayStorageProperties);
588 case Array::Int8Array:
589 case Array::Int16Array:
590 case Array::Int32Array:
591 case Array::Uint8Array:
592 case Array::Uint8ClampedArray:
593 case Array::Uint16Array:
594 case Array::Uint32Array:
595 case Array::Float32Array:
596 case Array::Float64Array:
597 read(TypedArrayProperties);
599 def(HeapLocation(IndexedPropertyLoc, TypedArrayProperties, node->child1(), node->child2()), LazyNode(node));
601 // We should not get an AnyTypedArray in a GetByVal as AnyTypedArray is only created from intrinsics, which
602 // are only added from Inline Caching a GetById.
603 case Array::AnyTypedArray:
604 DFG_CRASH(graph, node, "impossible array mode for get");
607 RELEASE_ASSERT_NOT_REACHED();
611 case GetMyArgumentByVal: {
613 // FIXME: It would be trivial to have a def here.
614 // https://bugs.webkit.org/show_bug.cgi?id=143077
620 case PutByValAlias: {
621 ArrayMode mode = node->arrayMode();
622 Node* base = graph.varArgChild(node, 0).node();
623 Node* index = graph.varArgChild(node, 1).node();
624 Node* value = graph.varArgChild(node, 2).node();
625 switch (mode.modeForPut().type()) {
626 case Array::SelectUsingPredictions:
627 case Array::SelectUsingArguments:
628 case Array::Unprofiled:
629 case Array::Undecided:
630 // Assume the worst since we don't have profiling yet.
635 case Array::ForceExit:
645 if (node->arrayMode().isOutOfBounds()) {
650 read(Butterfly_publicLength);
651 read(Butterfly_vectorLength);
652 read(IndexedInt32Properties);
653 write(IndexedInt32Properties);
654 if (node->arrayMode().mayStoreToHole())
655 write(Butterfly_publicLength);
656 def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, base, index), LazyNode(value));
660 if (node->arrayMode().isOutOfBounds()) {
665 read(Butterfly_publicLength);
666 read(Butterfly_vectorLength);
667 read(IndexedDoubleProperties);
668 write(IndexedDoubleProperties);
669 if (node->arrayMode().mayStoreToHole())
670 write(Butterfly_publicLength);
671 def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, base, index), LazyNode(value));
674 case Array::Contiguous:
675 if (node->arrayMode().isOutOfBounds()) {
680 read(Butterfly_publicLength);
681 read(Butterfly_vectorLength);
682 read(IndexedContiguousProperties);
683 write(IndexedContiguousProperties);
684 if (node->arrayMode().mayStoreToHole())
685 write(Butterfly_publicLength);
686 def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, base, index), LazyNode(value));
689 case Array::ArrayStorage:
690 case Array::SlowPutArrayStorage:
691 // Give up on life for now.
696 case Array::Int8Array:
697 case Array::Int16Array:
698 case Array::Int32Array:
699 case Array::Uint8Array:
700 case Array::Uint8ClampedArray:
701 case Array::Uint16Array:
702 case Array::Uint32Array:
703 case Array::Float32Array:
704 case Array::Float64Array:
706 write(TypedArrayProperties);
707 // FIXME: We can't def() anything here because these operations truncate their inputs.
708 // https://bugs.webkit.org/show_bug.cgi?id=134737
710 case Array::AnyTypedArray:
712 case Array::DirectArguments:
713 case Array::ScopedArguments:
714 DFG_CRASH(graph, node, "impossible array mode for put");
717 RELEASE_ASSERT_NOT_REACHED();
722 read(JSCell_structureID);
726 read(JSCell_indexingType);
727 read(JSCell_typeInfoType);
728 read(JSCell_structureID);
731 case CheckHasInstance:
732 read(JSCell_typeInfoFlags);
733 def(HeapLocation(CheckHasInstanceLoc, JSCell_typeInfoFlags, node->child1()), LazyNode(node));
737 read(JSCell_structureID);
738 def(HeapLocation(InstanceOfLoc, JSCell_structureID, node->child1(), node->child2()), LazyNode(node));
742 write(JSCell_structureID);
743 write(JSCell_typeInfoType);
744 write(JSCell_typeInfoFlags);
745 write(JSCell_indexingType);
748 case AllocatePropertyStorage:
749 write(JSObject_butterfly);
750 def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
753 case ReallocatePropertyStorage:
754 read(JSObject_butterfly);
755 write(JSObject_butterfly);
756 def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
760 read(JSObject_butterfly);
761 def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
764 case GetButterflyReadOnly:
765 // This rule is separate to prevent CSE of GetButterfly with GetButterflyReadOnly. But in reality,
766 // this works because we don't introduce GetButterflyReadOnly until the bitter end of compilation.
767 read(JSObject_butterfly);
768 def(HeapLocation(ButterflyReadOnlyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
772 case ArrayifyToStructure:
773 read(JSCell_structureID);
774 read(JSCell_indexingType);
775 read(JSObject_butterfly);
776 write(JSCell_structureID);
777 write(JSCell_indexingType);
778 write(JSObject_butterfly);
779 write(Watchpoint_fire);
782 case GetIndexedPropertyStorage:
783 if (node->arrayMode().type() == Array::String) {
784 def(PureValue(node, node->arrayMode().asWord()));
788 def(HeapLocation(IndexedPropertyStorageLoc, MiscFields, node->child1()), LazyNode(node));
791 case GetTypedArrayByteOffset:
793 def(HeapLocation(TypedArrayByteOffsetLoc, MiscFields, node->child1()), LazyNode(node));
797 case GetGetterSetterByOffset: {
798 unsigned identifierNumber = node->storageAccessData().identifierNumber;
799 AbstractHeap heap(NamedProperties, identifierNumber);
801 def(HeapLocation(NamedPropertyLoc, heap, node->child2()), LazyNode(node));
805 case MultiGetByOffset: {
806 read(JSCell_structureID);
807 read(JSObject_butterfly);
808 AbstractHeap heap(NamedProperties, node->multiGetByOffsetData().identifierNumber);
810 def(HeapLocation(NamedPropertyLoc, heap, node->child1()), LazyNode(node));
814 case MultiPutByOffset: {
815 read(JSCell_structureID);
816 read(JSObject_butterfly);
817 AbstractHeap heap(NamedProperties, node->multiPutByOffsetData().identifierNumber);
819 if (node->multiPutByOffsetData().writesStructures())
820 write(JSCell_structureID);
821 if (node->multiPutByOffsetData().reallocatesStorage())
822 write(JSObject_butterfly);
823 def(HeapLocation(NamedPropertyLoc, heap, node->child1()), LazyNode(node->child2().node()));
828 unsigned identifierNumber = node->storageAccessData().identifierNumber;
829 AbstractHeap heap(NamedProperties, identifierNumber);
831 def(HeapLocation(NamedPropertyLoc, heap, node->child2()), LazyNode(node->child3().node()));
835 case GetArrayLength: {
836 ArrayMode mode = node->arrayMode();
837 switch (mode.type()) {
840 case Array::Contiguous:
841 case Array::ArrayStorage:
842 case Array::SlowPutArrayStorage:
843 read(Butterfly_publicLength);
844 def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node->child1()), LazyNode(node));
848 def(PureValue(node, mode.asWord()));
851 case Array::DirectArguments:
852 case Array::ScopedArguments:
854 def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), LazyNode(node));
858 ASSERT(mode.isSomeTypedArrayView());
860 def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), LazyNode(node));
866 read(AbstractHeap(ScopeProperties, node->scopeOffset().offset()));
867 def(HeapLocation(ClosureVariableLoc, AbstractHeap(ScopeProperties, node->scopeOffset().offset()), node->child1()), LazyNode(node));
871 write(AbstractHeap(ScopeProperties, node->scopeOffset().offset()));
872 def(HeapLocation(ClosureVariableLoc, AbstractHeap(ScopeProperties, node->scopeOffset().offset()), node->child1()), LazyNode(node->child2().node()));
875 case GetFromArguments: {
876 AbstractHeap heap(DirectArgumentsProperties, node->capturedArgumentsOffset().offset());
878 def(HeapLocation(DirectArgumentsLoc, heap, node->child1()), LazyNode(node));
882 case PutToArguments: {
883 AbstractHeap heap(DirectArgumentsProperties, node->capturedArgumentsOffset().offset());
885 def(HeapLocation(DirectArgumentsLoc, heap, node->child1()), LazyNode(node->child2().node()));
890 case GetGlobalLexicalVariable:
891 read(AbstractHeap(Absolute, node->variablePointer()));
892 def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->variablePointer())), LazyNode(node));
895 case PutGlobalVariable:
896 write(AbstractHeap(Absolute, node->variablePointer()));
897 def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->variablePointer())), LazyNode(node->child2().node()));
900 case NewArrayWithSize:
902 read(HeapObjectCount);
903 write(HeapObjectCount);
907 read(HeapObjectCount);
908 write(HeapObjectCount);
910 unsigned numElements = node->numChildren();
912 def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node),
913 LazyNode(graph.freeze(jsNumber(numElements))));
919 switch (node->indexingType()) {
920 case ALL_DOUBLE_INDEXING_TYPES:
921 heap = IndexedDoubleProperties;
924 case ALL_INT32_INDEXING_TYPES:
925 heap = IndexedInt32Properties;
928 case ALL_CONTIGUOUS_INDEXING_TYPES:
929 heap = IndexedContiguousProperties;
936 if (numElements < graph.m_uint32ValuesInUse.size()) {
937 for (unsigned operandIdx = 0; operandIdx < numElements; ++operandIdx) {
938 Edge use = graph.m_varArgChildren[node->firstChild() + operandIdx];
939 def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(operandIdx)))),
940 LazyNode(use.node()));
943 for (uint32_t operandIdx : graph.m_uint32ValuesInUse) {
944 if (operandIdx >= numElements)
946 Edge use = graph.m_varArgChildren[node->firstChild() + operandIdx];
947 // operandIdx comes from graph.m_uint32ValuesInUse and thus is guaranteed to be already frozen
948 def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(operandIdx)))),
949 LazyNode(use.node()));
955 case NewArrayBuffer: {
956 read(HeapObjectCount);
957 write(HeapObjectCount);
959 unsigned numElements = node->numConstants();
960 def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node),
961 LazyNode(graph.freeze(jsNumber(numElements))));
964 NodeType op = JSConstant;
965 switch (node->indexingType()) {
966 case ALL_DOUBLE_INDEXING_TYPES:
967 heap = IndexedDoubleProperties;
971 case ALL_INT32_INDEXING_TYPES:
972 heap = IndexedInt32Properties;
975 case ALL_CONTIGUOUS_INDEXING_TYPES:
976 heap = IndexedContiguousProperties;
983 JSValue* data = graph.m_codeBlock->constantBuffer(node->startConstant());
984 if (numElements < graph.m_uint32ValuesInUse.size()) {
985 for (unsigned index = 0; index < numElements; ++index) {
986 def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(index)))),
987 LazyNode(graph.freeze(data[index]), op));
990 Vector<uint32_t> possibleIndices;
991 for (uint32_t index : graph.m_uint32ValuesInUse) {
992 if (index >= numElements)
994 possibleIndices.append(index);
996 for (uint32_t index : possibleIndices) {
997 def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(index)))),
998 LazyNode(graph.freeze(data[index]), op));
1012 case NewStringObject:
1013 case PhantomNewObject:
1014 case MaterializeNewObject:
1015 case PhantomNewFunction:
1016 case PhantomCreateActivation:
1017 case MaterializeCreateActivation:
1018 read(HeapObjectCount);
1019 write(HeapObjectCount);
1022 case NewArrowFunction:
1024 if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid())
1025 write(Watchpoint_fire);
1026 read(HeapObjectCount);
1027 write(HeapObjectCount);
1037 if (node->arrayMode().isOutOfBounds()) {
1042 def(PureValue(node));
1048 case CompareGreater:
1049 case CompareGreaterEq:
1050 if (!node->isBinaryUseKind(UntypedUse)) {
1051 def(PureValue(node));
1059 case CallStringConstructor:
1060 switch (node->child1().useKind()) {
1061 case StringObjectUse:
1062 case StringOrStringObjectUse:
1063 // These don't def a pure value, unfortunately. I'll avoid load-eliminating these for
1074 RELEASE_ASSERT_NOT_REACHED();
1078 case ThrowReferenceError:
1082 case CountExecution:
1083 case CheckWatchdogTimer:
1084 read(InternalState);
1085 write(InternalState);
1089 RELEASE_ASSERT_NOT_REACHED();
1093 DFG_CRASH(graph, node, toCString("Unrecognized node type: ", Graph::opName(node->op())).data());
1096 class NoOpClobberize {
1098 NoOpClobberize() { }
1099 template<typename... T>
1100 void operator()(T...) const { }
1103 class CheckClobberize {
1110 template<typename... T>
1111 void operator()(T...) const { m_result = true; }
1113 bool result() const { return m_result; }
1116 mutable bool m_result;
1119 bool doesWrites(Graph&, Node*);
1121 class AbstractHeapOverlaps {
1123 AbstractHeapOverlaps(AbstractHeap heap)
1129 void operator()(AbstractHeap otherHeap) const
1133 m_result = m_heap.overlaps(otherHeap);
1136 bool result() const { return m_result; }
1139 AbstractHeap m_heap;
1140 mutable bool m_result;
1143 bool accessesOverlap(Graph&, Node*, AbstractHeap);
1144 bool writesOverlap(Graph&, Node*, AbstractHeap);
1146 bool clobbersHeap(Graph&, Node*);
1148 // We would have used bind() for these, but because of the overlaoding that we are doing,
1149 // it's quite a bit of clearer to just write this out the traditional way.
1151 template<typename T>
1152 class ReadMethodClobberize {
1154 ReadMethodClobberize(T& value)
1159 void operator()(AbstractHeap heap) const
1167 template<typename T>
1168 class WriteMethodClobberize {
1170 WriteMethodClobberize(T& value)
1175 void operator()(AbstractHeap heap) const
1177 m_value.write(heap);
1183 template<typename T>
1184 class DefMethodClobberize {
1186 DefMethodClobberize(T& value)
1191 void operator()(PureValue value) const
1196 void operator()(HeapLocation location, LazyNode node) const
1198 m_value.def(location, node);
1205 template<typename Adaptor>
1206 void clobberize(Graph& graph, Node* node, Adaptor& adaptor)
1208 ReadMethodClobberize<Adaptor> read(adaptor);
1209 WriteMethodClobberize<Adaptor> write(adaptor);
1210 DefMethodClobberize<Adaptor> def(adaptor);
1211 clobberize(graph, node, read, write, def);
1214 } } // namespace JSC::DFG
1216 #endif // ENABLE(DFG_JIT)
1218 #endif // DFGClobberize_h