2 * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGClobberize_h
27 #define DFGClobberize_h
31 #include "DFGAbstractHeap.h"
32 #include "DFGEdgeUsesStructure.h"
34 #include "DFGHeapLocation.h"
35 #include "DFGLazyNode.h"
36 #include "DFGPureValue.h"
38 namespace JSC { namespace DFG {
40 template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor>
41 void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def)
45 // - The canonical way of clobbering the world is to read world and write
46 // heap. This is because World subsumes Heap and Stack, and Stack can be
47 // read by anyone but only written to by explicit stack writing operations.
48 // Of course, claiming to also write World is not wrong; it'll just
49 // pessimise some important optimizations.
51 // - We cannot hoist, or sink, anything that has effects. This means that the
52 // easiest way of indicating that something cannot be hoisted is to claim
53 // that it side-effects some miscellaneous thing.
55 // - We cannot hoist forward-exiting nodes without some additional effort. I
56 // believe that what it comes down to is that forward-exiting generally have
57 // their NodeExitsForward cleared upon hoist, except for forward-exiting
58 // nodes that take bogus state as their input. Those are substantially
59 // harder. We disable it for now. In the future we could enable it by having
60 // versions of those nodes that backward-exit instead, but I'm not convinced
63 // - Some nodes lie, and claim that they do not read the JSCell_structureID,
64 // JSCell_typeInfoFlags, etc. These are nodes that use the structure in a way
65 // that does not depend on things that change under structure transitions.
67 // - It's implicitly understood that OSR exits read the world. This is why we
68 // generally don't move or eliminate stores. Every node can exit, so the
69 // read set does not reflect things that would be read if we exited.
70 // Instead, the read set reflects what the node will have to read if it
73 // - Broadly, we don't say that we're reading something if that something is
76 // - We try to make this work even prior to type inference, just so that we
77 // can use it for IR dumps. No promises on whether the answers are sound
78 // prior to type inference - though they probably could be if we did some
81 // - If you do read(Stack) or read(World), then make sure that readTop() in
82 // PreciseLocalClobberize is correct.
84 // While read() and write() are fairly self-explanatory - they track what sorts of things the
85 // node may read or write - the def() functor is more tricky. It tells you the heap locations
86 // (not just abstract heaps) that are defined by a node. A heap location comprises an abstract
87 // heap, some nodes, and a LocationKind. Briefly, a location defined by a node is a location
88 // whose value can be deduced from looking at the node itself. The locations returned must obey
89 // the following properties:
91 // - If someone wants to CSE a load from the heap, then a HeapLocation object should be
92 // sufficient to find a single matching node.
94 // - The abstract heap is the only abstract heap that could be clobbered to invalidate any such
95 // CSE attempt. I.e. if clobberize() reports that on every path between some node and a node
96 // that defines a HeapLocation that it wanted, there were no writes to any abstract heap that
97 // overlap the location's heap, then we have a sound match. Effectively, the semantics of
98 // write() and def() are intertwined such that for them to be sound they must agree on what
101 // read(), write(), and def() for heap locations is enough to do GCSE on effectful things. To
102 // keep things simple, this code will also def() pure things. def() must be overloaded to also
103 // accept PureValue. This way, a client of clobberize() can implement GCSE entirely using the
104 // information that clobberize() passes to write() and def(). Other clients of clobberize() can
105 // just ignore def() by using a NoOpClobberize functor.
107 if (edgesUseStructure(graph, node))
108 read(JSCell_structureID);
110 // We allow the runtime to perform a stack scan at any time. We don't model which nodes get implemented
111 // by calls into the runtime. For debugging we might replace the implementation of any node with a call
112 // to the runtime, and that call may walk stack. Therefore, each node must read() anything that a stack
113 // scan would read. That's what this does.
114 for (InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
115 if (inlineCallFrame->isClosureCall)
116 read(AbstractHeap(Stack, inlineCallFrame->stackOffset + CallFrameSlot::callee));
117 if (inlineCallFrame->isVarargs())
118 read(AbstractHeap(Stack, inlineCallFrame->stackOffset + CallFrameSlot::argumentCount));
121 // We don't want to specifically account which nodes can read from the scope
122 // when the debugger is enabled. It's helpful to just claim all nodes do.
123 // Specifically, if a node allocates, this may call into the debugger's machinery.
124 // The debugger's machinery is free to take a stack trace and try to read from
125 // a scope which is expected to be flushed to the stack.
126 if (graph.hasDebuggerEnabled()) {
127 ASSERT(!node->origin.semantic.inlineCallFrame);
128 read(AbstractHeap(Stack, graph.m_codeBlock->scopeRegister()));
132 switch (node->op()) {
136 def(PureValue(node, node->constant()));
142 case ExtractOSREntryLocal:
143 case CheckStructureImmediate:
147 // We should enable CSE of LazyJSConstant. It's a little annoying since LazyJSValue has
148 // more bits than we currently have in PureValue.
157 case GetGlobalObject:
158 case StringCharCodeAt:
159 case CompareStrictEq:
169 case IsTypedArrayView:
175 case BooleanToNumber:
183 def(PureValue(node));
191 if (node->child1().useKind() == DoubleRepUse)
192 def(PureValue(node));
200 if (node->child1().useKind() == Int32Use || node->child1().useKind() == DoubleRepUse)
201 def(PureValue(node));
209 if (node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use)
210 def(PureValue(node));
223 if (node->child1().useKind() == UntypedUse || node->child2().useKind() == UntypedUse) {
228 def(PureValue(node));
232 read(MathDotRandomState);
233 write(MathDotRandomState);
236 case HasGenericProperty:
237 case HasStructureProperty:
238 case GetEnumerableLength:
239 case GetPropertyEnumerator: {
245 case GetDirectPname: {
246 // This reads and writes heap because it can end up calling a generic getByVal
247 // if the Structure changed, which could in turn end up calling a getter.
254 case GetEnumeratorStructurePname:
255 case GetEnumeratorGenericPname: {
256 def(PureValue(node));
260 case HasIndexedProperty: {
261 read(JSObject_butterfly);
262 ArrayMode mode = node->arrayMode();
263 switch (mode.type()) {
265 if (mode.isInBounds()) {
266 read(Butterfly_publicLength);
267 read(IndexedInt32Properties);
268 def(HeapLocation(HasIndexedPropertyLoc, IndexedInt32Properties, node->child1(), node->child2()), LazyNode(node));
275 case Array::Double: {
276 if (mode.isInBounds()) {
277 read(Butterfly_publicLength);
278 read(IndexedDoubleProperties);
279 def(HeapLocation(HasIndexedPropertyLoc, IndexedDoubleProperties, node->child1(), node->child2()), LazyNode(node));
286 case Array::Contiguous: {
287 if (mode.isInBounds()) {
288 read(Butterfly_publicLength);
289 read(IndexedContiguousProperties);
290 def(HeapLocation(HasIndexedPropertyLoc, IndexedContiguousProperties, node->child1(), node->child2()), LazyNode(node));
297 case Array::ArrayStorage: {
298 if (mode.isInBounds()) {
299 read(Butterfly_vectorLength);
300 read(IndexedArrayStorageProperties);
313 RELEASE_ASSERT_NOT_REACHED();
317 case StringFromCharCode:
318 switch (node->child1().useKind()) {
320 def(PureValue(node));
327 DFG_CRASH(graph, node, "Bad use kind");
336 def(PureValue(node, node->arithMode()));
342 switch (node->binaryUseKind()) {
346 def(PureValue(node, node->arithMode()));
353 DFG_CRASH(graph, node, "Bad use kind");
360 def(PureValue(node, static_cast<uintptr_t>(node->arithRoundingMode())));
364 def(PureValue(CheckCell, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->cellOperand()));
368 def(PureValue(CheckNotEmpty, AdjacencyList(AdjacencyList::Fixed, node->child1())));
371 case CheckStringIdent:
372 def(PureValue(CheckStringIdent, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->uidOperand()));
375 case ConstantStoragePointer:
376 def(PureValue(node, node->storagePointer()));
395 case CheckTierUpInLoop:
396 case CheckTierUpAtReturn:
397 case CheckTierUpAndOSREnter:
400 case ProfileControlFlow:
406 case InvalidationPoint:
408 def(HeapLocation(InvalidationPointLoc, Watchpoint_fire), LazyNode(node));
412 read(AbstractHeap(Stack, node->local()));
417 write(Watchpoint_fire);
421 case CreateActivation: {
422 SymbolTable* table = node->castOperand<SymbolTable*>();
423 if (table->singletonScope()->isStillValid())
424 write(Watchpoint_fire);
425 read(HeapObjectCount);
426 write(HeapObjectCount);
430 case CreateDirectArguments:
431 case CreateScopedArguments:
432 case CreateClonedArguments:
434 read(HeapObjectCount);
435 write(HeapObjectCount);
438 case PhantomDirectArguments:
439 case PhantomClonedArguments:
440 // DFG backend requires that the locals that this reads are flushed. FTL backend can handle those
441 // locals being promoted.
442 if (!isFTL(graph.m_plan.mode))
445 // Even though it's phantom, it still has the property that one can't be replaced with another.
446 read(HeapObjectCount);
447 write(HeapObjectCount);
450 case CallObjectConstructor:
454 read(HeapObjectCount);
455 write(HeapObjectCount);
460 def(HeapLocation(IsObjectOrNullLoc, MiscFields, node->child1()), LazyNode(node));
465 def(HeapLocation(IsFunctionLoc, MiscFields, node->child1()), LazyNode(node));
470 case GetByIdWithThis:
471 case GetByValWithThis:
473 case PutByIdWithThis:
474 case PutByValWithThis:
479 case PutGetterSetterById:
487 case TailCallInlinedCaller:
490 case CallForwardVarargs:
491 case TailCallVarargsInlinedCaller:
492 case TailCallForwardVarargsInlinedCaller:
493 case ConstructVarargs:
494 case ConstructForwardVarargs:
498 case SetFunctionName:
507 ASSERT(!node->origin.semantic.inlineCallFrame);
508 read(AbstractHeap(Stack, graph.m_codeBlock->scopeRegister()));
509 read(AbstractHeap(Stack, virtualRegisterForArgument(0)));
515 case TailCallVarargs:
516 case TailCallForwardVarargs:
522 read(GetterSetter_getter);
523 def(HeapLocation(GetterLoc, GetterSetter_getter, node->child1()), LazyNode(node));
527 read(GetterSetter_setter);
528 def(HeapLocation(SetterLoc, GetterSetter_setter, node->child1()), LazyNode(node));
532 read(AbstractHeap(Stack, CallFrameSlot::callee));
533 def(HeapLocation(StackLoc, AbstractHeap(Stack, CallFrameSlot::callee)), LazyNode(node));
536 case GetArgumentCountIncludingThis:
537 read(AbstractHeap(Stack, CallFrameSlot::argumentCount));
538 def(HeapLocation(StackPayloadLoc, AbstractHeap(Stack, CallFrameSlot::argumentCount)), LazyNode(node));
546 read(AbstractHeap(Stack, node->local()));
547 def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node));
551 write(AbstractHeap(Stack, node->local()));
552 def(HeapLocation(StackLoc, AbstractHeap(Stack, node->local())), LazyNode(node->child1().node()));
556 AbstractHeap heap(Stack, node->stackAccessData()->local);
558 def(HeapLocation(StackLoc, heap), LazyNode(node));
563 AbstractHeap heap(Stack, node->stackAccessData()->local);
565 def(HeapLocation(StackLoc, heap), LazyNode(node->child1().node()));
572 LoadVarargsData* data = node->loadVarargsData();
573 write(AbstractHeap(Stack, data->count.offset()));
574 for (unsigned i = data->limit; i--;)
575 write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i)));
579 case ForwardVarargs: {
580 // We could be way more precise here.
583 LoadVarargsData* data = node->loadVarargsData();
584 write(AbstractHeap(Stack, data->count.offset()));
585 for (unsigned i = data->limit; i--;)
586 write(AbstractHeap(Stack, data->start.offset() + static_cast<int>(i)));
590 case GetLocalUnlinked:
591 read(AbstractHeap(Stack, node->unlinkedLocal()));
592 def(HeapLocation(StackLoc, AbstractHeap(Stack, node->unlinkedLocal())), LazyNode(node));
596 ArrayMode mode = node->arrayMode();
597 switch (mode.type()) {
598 case Array::SelectUsingPredictions:
599 case Array::Unprofiled:
600 case Array::SelectUsingArguments:
601 // Assume the worst since we don't have profiling yet.
606 case Array::ForceExit:
616 if (mode.isOutOfBounds()) {
621 // This appears to read nothing because it's only reading immutable data.
622 def(PureValue(node, mode.asWord()));
625 case Array::DirectArguments:
626 read(DirectArgumentsProperties);
627 def(HeapLocation(IndexedPropertyLoc, DirectArgumentsProperties, node->child1(), node->child2()), LazyNode(node));
630 case Array::ScopedArguments:
631 read(ScopeProperties);
632 def(HeapLocation(IndexedPropertyLoc, ScopeProperties, node->child1(), node->child2()), LazyNode(node));
636 if (mode.isInBounds()) {
637 read(Butterfly_publicLength);
638 read(IndexedInt32Properties);
639 def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, node->child1(), node->child2()), LazyNode(node));
647 if (mode.isInBounds()) {
648 read(Butterfly_publicLength);
649 read(IndexedDoubleProperties);
650 def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, node->child1(), node->child2()), LazyNode(node));
657 case Array::Contiguous:
658 if (mode.isInBounds()) {
659 read(Butterfly_publicLength);
660 read(IndexedContiguousProperties);
661 def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, node->child1(), node->child2()), LazyNode(node));
668 case Array::Undecided:
669 def(PureValue(node));
672 case Array::ArrayStorage:
673 case Array::SlowPutArrayStorage:
674 if (mode.isInBounds()) {
675 read(Butterfly_vectorLength);
676 read(IndexedArrayStorageProperties);
683 case Array::Int8Array:
684 case Array::Int16Array:
685 case Array::Int32Array:
686 case Array::Uint8Array:
687 case Array::Uint8ClampedArray:
688 case Array::Uint16Array:
689 case Array::Uint32Array:
690 case Array::Float32Array:
691 case Array::Float64Array:
692 read(TypedArrayProperties);
694 def(HeapLocation(IndexedPropertyLoc, TypedArrayProperties, node->child1(), node->child2()), LazyNode(node));
696 // We should not get an AnyTypedArray in a GetByVal as AnyTypedArray is only created from intrinsics, which
697 // are only added from Inline Caching a GetById.
698 case Array::AnyTypedArray:
699 DFG_CRASH(graph, node, "impossible array mode for get");
702 RELEASE_ASSERT_NOT_REACHED();
706 case GetMyArgumentByVal:
707 case GetMyArgumentByValOutOfBounds: {
709 // FIXME: It would be trivial to have a def here.
710 // https://bugs.webkit.org/show_bug.cgi?id=143077
716 case PutByValAlias: {
717 ArrayMode mode = node->arrayMode();
718 Node* base = graph.varArgChild(node, 0).node();
719 Node* index = graph.varArgChild(node, 1).node();
720 Node* value = graph.varArgChild(node, 2).node();
721 switch (mode.modeForPut().type()) {
722 case Array::SelectUsingPredictions:
723 case Array::SelectUsingArguments:
724 case Array::Unprofiled:
725 case Array::Undecided:
726 // Assume the worst since we don't have profiling yet.
731 case Array::ForceExit:
741 if (node->arrayMode().isOutOfBounds()) {
746 read(Butterfly_publicLength);
747 read(Butterfly_vectorLength);
748 read(IndexedInt32Properties);
749 write(IndexedInt32Properties);
750 if (node->arrayMode().mayStoreToHole())
751 write(Butterfly_publicLength);
752 def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, base, index), LazyNode(value));
756 if (node->arrayMode().isOutOfBounds()) {
761 read(Butterfly_publicLength);
762 read(Butterfly_vectorLength);
763 read(IndexedDoubleProperties);
764 write(IndexedDoubleProperties);
765 if (node->arrayMode().mayStoreToHole())
766 write(Butterfly_publicLength);
767 def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, base, index), LazyNode(value));
770 case Array::Contiguous:
771 if (node->arrayMode().isOutOfBounds()) {
776 read(Butterfly_publicLength);
777 read(Butterfly_vectorLength);
778 read(IndexedContiguousProperties);
779 write(IndexedContiguousProperties);
780 if (node->arrayMode().mayStoreToHole())
781 write(Butterfly_publicLength);
782 def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, base, index), LazyNode(value));
785 case Array::ArrayStorage:
786 case Array::SlowPutArrayStorage:
787 // Give up on life for now.
792 case Array::Int8Array:
793 case Array::Int16Array:
794 case Array::Int32Array:
795 case Array::Uint8Array:
796 case Array::Uint8ClampedArray:
797 case Array::Uint16Array:
798 case Array::Uint32Array:
799 case Array::Float32Array:
800 case Array::Float64Array:
802 write(TypedArrayProperties);
803 // FIXME: We can't def() anything here because these operations truncate their inputs.
804 // https://bugs.webkit.org/show_bug.cgi?id=134737
806 case Array::AnyTypedArray:
808 case Array::DirectArguments:
809 case Array::ScopedArguments:
810 DFG_CRASH(graph, node, "impossible array mode for put");
813 RELEASE_ASSERT_NOT_REACHED();
818 read(JSCell_structureID);
822 read(JSCell_indexingType);
823 read(JSCell_typeInfoType);
824 read(JSCell_structureID);
827 case CheckTypeInfoFlags:
828 read(JSCell_typeInfoFlags);
829 def(HeapLocation(CheckTypeInfoFlagsLoc, JSCell_typeInfoFlags, node->child1()), LazyNode(node));
832 case OverridesHasInstance:
833 read(JSCell_typeInfoFlags);
834 def(HeapLocation(OverridesHasInstanceLoc, JSCell_typeInfoFlags, node->child1()), LazyNode(node));
838 read(JSCell_structureID);
839 def(HeapLocation(InstanceOfLoc, JSCell_structureID, node->child1(), node->child2()), LazyNode(node));
842 case InstanceOfCustom:
848 write(JSCell_structureID);
849 write(JSCell_typeInfoType);
850 write(JSCell_typeInfoFlags);
851 write(JSCell_indexingType);
854 case AllocatePropertyStorage:
855 write(JSObject_butterfly);
856 def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
859 case ReallocatePropertyStorage:
860 read(JSObject_butterfly);
861 write(JSObject_butterfly);
862 def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
866 read(JSObject_butterfly);
867 def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), LazyNode(node));
871 case ArrayifyToStructure:
872 read(JSCell_structureID);
873 read(JSCell_indexingType);
874 read(JSObject_butterfly);
875 write(JSCell_structureID);
876 write(JSCell_indexingType);
877 write(JSObject_butterfly);
878 write(Watchpoint_fire);
881 case GetIndexedPropertyStorage:
882 if (node->arrayMode().type() == Array::String) {
883 def(PureValue(node, node->arrayMode().asWord()));
887 def(HeapLocation(IndexedPropertyStorageLoc, MiscFields, node->child1()), LazyNode(node));
890 case GetTypedArrayByteOffset:
892 def(HeapLocation(TypedArrayByteOffsetLoc, MiscFields, node->child1()), LazyNode(node));
896 case GetGetterSetterByOffset: {
897 unsigned identifierNumber = node->storageAccessData().identifierNumber;
898 AbstractHeap heap(NamedProperties, identifierNumber);
900 def(HeapLocation(NamedPropertyLoc, heap, node->child2()), LazyNode(node));
909 case MultiGetByOffset: {
910 read(JSCell_structureID);
911 read(JSObject_butterfly);
912 AbstractHeap heap(NamedProperties, node->multiGetByOffsetData().identifierNumber);
914 // FIXME: We cannot def() for MultiGetByOffset because CSE is not smart enough to decay it
915 // to a CheckStructure.
916 // https://bugs.webkit.org/show_bug.cgi?id=159859
920 case MultiPutByOffset: {
921 read(JSCell_structureID);
922 read(JSObject_butterfly);
923 AbstractHeap heap(NamedProperties, node->multiPutByOffsetData().identifierNumber);
925 if (node->multiPutByOffsetData().writesStructures())
926 write(JSCell_structureID);
927 if (node->multiPutByOffsetData().reallocatesStorage())
928 write(JSObject_butterfly);
929 def(HeapLocation(NamedPropertyLoc, heap, node->child1()), LazyNode(node->child2().node()));
934 unsigned identifierNumber = node->storageAccessData().identifierNumber;
935 AbstractHeap heap(NamedProperties, identifierNumber);
937 def(HeapLocation(NamedPropertyLoc, heap, node->child2()), LazyNode(node->child3().node()));
941 case GetArrayLength: {
942 ArrayMode mode = node->arrayMode();
943 switch (mode.type()) {
946 case Array::Contiguous:
947 case Array::ArrayStorage:
948 case Array::SlowPutArrayStorage:
949 read(Butterfly_publicLength);
950 def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node->child1()), LazyNode(node));
954 def(PureValue(node, mode.asWord()));
957 case Array::DirectArguments:
958 case Array::ScopedArguments:
960 def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), LazyNode(node));
964 ASSERT(mode.isSomeTypedArrayView());
966 def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), LazyNode(node));
972 read(AbstractHeap(ScopeProperties, node->scopeOffset().offset()));
973 def(HeapLocation(ClosureVariableLoc, AbstractHeap(ScopeProperties, node->scopeOffset().offset()), node->child1()), LazyNode(node));
977 write(AbstractHeap(ScopeProperties, node->scopeOffset().offset()));
978 def(HeapLocation(ClosureVariableLoc, AbstractHeap(ScopeProperties, node->scopeOffset().offset()), node->child1()), LazyNode(node->child2().node()));
981 case GetRegExpObjectLastIndex:
982 read(RegExpObject_lastIndex);
983 def(HeapLocation(RegExpObjectLastIndexLoc, RegExpObject_lastIndex, node->child1()), LazyNode(node));
986 case SetRegExpObjectLastIndex:
987 write(RegExpObject_lastIndex);
988 def(HeapLocation(RegExpObjectLastIndexLoc, RegExpObject_lastIndex, node->child1()), LazyNode(node->child2().node()));
991 case RecordRegExpCachedResult:
995 case GetFromArguments: {
996 AbstractHeap heap(DirectArgumentsProperties, node->capturedArgumentsOffset().offset());
998 def(HeapLocation(DirectArgumentsLoc, heap, node->child1()), LazyNode(node));
1002 case PutToArguments: {
1003 AbstractHeap heap(DirectArgumentsProperties, node->capturedArgumentsOffset().offset());
1005 def(HeapLocation(DirectArgumentsLoc, heap, node->child1()), LazyNode(node->child2().node()));
1010 case GetGlobalLexicalVariable:
1011 read(AbstractHeap(Absolute, node->variablePointer()));
1012 def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->variablePointer())), LazyNode(node));
1015 case PutGlobalVariable:
1016 write(AbstractHeap(Absolute, node->variablePointer()));
1017 def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->variablePointer())), LazyNode(node->child2().node()));
1020 case NewArrayWithSize:
1022 read(HeapObjectCount);
1023 write(HeapObjectCount);
1027 read(HeapObjectCount);
1028 write(HeapObjectCount);
1030 unsigned numElements = node->numChildren();
1032 def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node),
1033 LazyNode(graph.freeze(jsNumber(numElements))));
1039 switch (node->indexingType()) {
1040 case ALL_DOUBLE_INDEXING_TYPES:
1041 heap = IndexedDoubleProperties;
1044 case ALL_INT32_INDEXING_TYPES:
1045 heap = IndexedInt32Properties;
1048 case ALL_CONTIGUOUS_INDEXING_TYPES:
1049 heap = IndexedContiguousProperties;
1056 if (numElements < graph.m_uint32ValuesInUse.size()) {
1057 for (unsigned operandIdx = 0; operandIdx < numElements; ++operandIdx) {
1058 Edge use = graph.m_varArgChildren[node->firstChild() + operandIdx];
1059 def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(operandIdx)))),
1060 LazyNode(use.node()));
1063 for (uint32_t operandIdx : graph.m_uint32ValuesInUse) {
1064 if (operandIdx >= numElements)
1066 Edge use = graph.m_varArgChildren[node->firstChild() + operandIdx];
1067 // operandIdx comes from graph.m_uint32ValuesInUse and thus is guaranteed to be already frozen
1068 def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(operandIdx)))),
1069 LazyNode(use.node()));
1075 case NewArrayBuffer: {
1076 read(HeapObjectCount);
1077 write(HeapObjectCount);
1079 unsigned numElements = node->numConstants();
1080 def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node),
1081 LazyNode(graph.freeze(jsNumber(numElements))));
1084 NodeType op = JSConstant;
1085 switch (node->indexingType()) {
1086 case ALL_DOUBLE_INDEXING_TYPES:
1087 heap = IndexedDoubleProperties;
1088 op = DoubleConstant;
1091 case ALL_INT32_INDEXING_TYPES:
1092 heap = IndexedInt32Properties;
1095 case ALL_CONTIGUOUS_INDEXING_TYPES:
1096 heap = IndexedContiguousProperties;
1103 JSValue* data = graph.m_codeBlock->constantBuffer(node->startConstant());
1104 if (numElements < graph.m_uint32ValuesInUse.size()) {
1105 for (unsigned index = 0; index < numElements; ++index) {
1106 def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(index)))),
1107 LazyNode(graph.freeze(data[index]), op));
1110 Vector<uint32_t> possibleIndices;
1111 for (uint32_t index : graph.m_uint32ValuesInUse) {
1112 if (index >= numElements)
1114 possibleIndices.append(index);
1116 for (uint32_t index : possibleIndices) {
1117 def(HeapLocation(IndexedPropertyLoc, heap, node, LazyNode(graph.freeze(jsNumber(index)))),
1118 LazyNode(graph.freeze(data[index]), op));
1125 if (!graph.isWatchingHavingABadTimeWatchpoint(node)) {
1126 // This means we're already having a bad time.
1132 read(HeapObjectCount);
1133 write(HeapObjectCount);
1139 case NewStringObject:
1140 case PhantomNewObject:
1141 case MaterializeNewObject:
1142 case PhantomNewFunction:
1143 case PhantomNewGeneratorFunction:
1144 case PhantomCreateActivation:
1145 case MaterializeCreateActivation:
1146 read(HeapObjectCount);
1147 write(HeapObjectCount);
1151 case NewGeneratorFunction:
1152 if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid())
1153 write(Watchpoint_fire);
1154 read(HeapObjectCount);
1155 write(HeapObjectCount);
1160 if (node->child2().useKind() == RegExpObjectUse
1161 && node->child3().useKind() == StringUse) {
1163 read(RegExpObject_lastIndex);
1165 write(RegExpObject_lastIndex);
1173 case StringReplaceRegExp:
1174 if (node->child1().useKind() == StringUse
1175 && node->child2().useKind() == RegExpObjectUse
1176 && node->child3().useKind() == StringUse) {
1178 read(RegExpObject_lastIndex);
1180 write(RegExpObject_lastIndex);
1188 if (node->arrayMode().isOutOfBounds()) {
1193 def(PureValue(node));
1199 case CompareGreater:
1200 case CompareGreaterEq:
1201 if (node->isBinaryUseKind(StringUse)) {
1202 read(HeapObjectCount);
1203 write(HeapObjectCount);
1206 if (!node->isBinaryUseKind(UntypedUse)) {
1207 def(PureValue(node));
1221 case CallStringConstructor:
1222 switch (node->child1().useKind()) {
1223 case StringObjectUse:
1224 case StringOrStringObjectUse:
1225 // These don't def a pure value, unfortunately. I'll avoid load-eliminating these for
1236 RELEASE_ASSERT_NOT_REACHED();
1240 case ThrowReferenceError:
1244 case CountExecution:
1245 case CheckWatchdogTimer:
1246 read(InternalState);
1247 write(InternalState);
1250 case LogShadowChickenPrologue:
1251 case LogShadowChickenTail:
1256 RELEASE_ASSERT_NOT_REACHED();
1260 DFG_CRASH(graph, node, toCString("Unrecognized node type: ", Graph::opName(node->op())).data());
1263 class NoOpClobberize {
1265 NoOpClobberize() { }
1266 template<typename... T>
1267 void operator()(T...) const { }
1270 class CheckClobberize {
1277 template<typename... T>
1278 void operator()(T...) const { m_result = true; }
1280 bool result() const { return m_result; }
1283 mutable bool m_result;
1286 bool doesWrites(Graph&, Node*);
1288 class AbstractHeapOverlaps {
1290 AbstractHeapOverlaps(AbstractHeap heap)
1296 void operator()(AbstractHeap otherHeap) const
1300 m_result = m_heap.overlaps(otherHeap);
1303 bool result() const { return m_result; }
1306 AbstractHeap m_heap;
1307 mutable bool m_result;
1310 bool accessesOverlap(Graph&, Node*, AbstractHeap);
1311 bool writesOverlap(Graph&, Node*, AbstractHeap);
1313 bool clobbersHeap(Graph&, Node*);
1315 // We would have used bind() for these, but because of the overlaoding that we are doing,
1316 // it's quite a bit of clearer to just write this out the traditional way.
1318 template<typename T>
1319 class ReadMethodClobberize {
1321 ReadMethodClobberize(T& value)
1326 void operator()(AbstractHeap heap) const
1334 template<typename T>
1335 class WriteMethodClobberize {
1337 WriteMethodClobberize(T& value)
1342 void operator()(AbstractHeap heap) const
1344 m_value.write(heap);
1350 template<typename T>
1351 class DefMethodClobberize {
1353 DefMethodClobberize(T& value)
1358 void operator()(PureValue value) const
1363 void operator()(HeapLocation location, LazyNode node) const
1365 m_value.def(location, node);
1372 template<typename Adaptor>
1373 void clobberize(Graph& graph, Node* node, Adaptor& adaptor)
1375 ReadMethodClobberize<Adaptor> read(adaptor);
1376 WriteMethodClobberize<Adaptor> write(adaptor);
1377 DefMethodClobberize<Adaptor> def(adaptor);
1378 clobberize(graph, node, read, write, def);
1381 } } // namespace JSC::DFG
1383 #endif // ENABLE(DFG_JIT)
1385 #endif // DFGClobberize_h