2 * Copyright (C) 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGFixupPhase.h"
32 #include "DFGInsertionSet.h"
35 namespace JSC { namespace DFG {
37 class FixupPhase : public Phase {
39 FixupPhase(Graph& graph)
40 : Phase(graph, "fixup")
46 for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex)
47 fixupBlock(m_graph.m_blocks[blockIndex].get());
52 void fixupBlock(BasicBlock* block)
56 ASSERT(block->isReachable);
57 for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) {
58 m_compileIndex = block->at(m_indexInBlock);
59 fixupNode(m_graph[m_compileIndex]);
61 m_insertionSet.execute(*block);
64 void fixupNode(Node& node)
66 if (!node.shouldGenerate())
69 NodeType op = node.op();
71 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
72 dataLogF(" %s @%u: ", Graph::opName(op), m_compileIndex);
77 if (m_graph.m_fixpointState > BeforeFixpoint)
80 Node* nodePtr = &node;
82 if (!isInt32Speculation(m_graph[m_compileIndex].prediction()))
84 if (codeBlock()->identifier(nodePtr->identifierNumber()) != globalData().propertyNames->length)
86 ArrayProfile* arrayProfile =
87 m_graph.baselineCodeBlockFor(nodePtr->codeOrigin)->getArrayProfile(
88 nodePtr->codeOrigin.bytecodeIndex);
89 ArrayMode arrayMode = ArrayMode(Array::SelectUsingPredictions);
91 arrayProfile->computeUpdatedPrediction(m_graph.baselineCodeBlockFor(node.codeOrigin));
92 arrayMode = ArrayMode::fromObserved(arrayProfile, Array::Read, false);
93 arrayMode = arrayMode.refine(
94 m_graph[node.child1()].prediction(),
95 m_graph[m_compileIndex].prediction());
96 if (arrayMode.supportsLength() && arrayProfile->hasDefiniteStructure()) {
97 m_graph.ref(nodePtr->child1());
98 Node checkStructure(CheckStructure, nodePtr->codeOrigin, OpInfo(m_graph.addStructureSet(arrayProfile->expectedStructure())), nodePtr->child1().index());
100 NodeIndex checkStructureIndex = m_graph.size();
101 m_graph.append(checkStructure);
102 m_insertionSet.append(m_indexInBlock, checkStructureIndex);
103 nodePtr = &m_graph[m_compileIndex];
106 arrayMode = arrayMode.refine(
107 m_graph[node.child1()].prediction(),
108 m_graph[m_compileIndex].prediction());
110 if (!arrayMode.supportsLength())
112 nodePtr->setOp(GetArrayLength);
113 ASSERT(nodePtr->flags() & NodeMustGenerate);
114 nodePtr->clearFlags(NodeMustGenerate | NodeClobbersWorld);
115 m_graph.deref(m_compileIndex);
116 nodePtr->setArrayMode(arrayMode);
118 NodeIndex storage = checkArray(arrayMode, nodePtr->codeOrigin, nodePtr->child1().index(), NoNode, lengthNeedsStorage, nodePtr->shouldGenerate());
119 if (storage == NoNode)
122 nodePtr = &m_graph[m_compileIndex];
123 nodePtr->children.child2() = Edge(storage);
126 case GetIndexedPropertyStorage: {
127 ASSERT(node.arrayMode().canCSEStorage());
132 node.arrayMode().refine(
133 m_graph[node.child1()].prediction(),
134 m_graph[node.child2()].prediction()));
136 blessArrayOperation(node.child1(), node.child2(), 2);
138 Node* nodePtr = &m_graph[m_compileIndex];
139 ArrayMode arrayMode = nodePtr->arrayMode();
140 if (arrayMode.type() == Array::Double
141 && arrayMode.arrayClass() == Array::OriginalArray
142 && arrayMode.speculation() == Array::InBounds
143 && arrayMode.conversion() == Array::AsIs
144 && m_graph.globalObjectFor(nodePtr->codeOrigin)->arrayPrototypeChainIsSane()
145 && !(nodePtr->flags() & NodeUsedAsOther))
146 nodePtr->setArrayMode(arrayMode.withSpeculation(Array::SaneChain));
151 case StringCharCodeAt: {
152 // Currently we have no good way of refining these.
153 ASSERT(node.arrayMode() == ArrayMode(Array::String));
154 blessArrayOperation(node.child1(), node.child2(), 2);
159 // May need to refine the array mode in case the value prediction contravenes
160 // the array prediction. For example, we may have evidence showing that the
161 // array is in Int32 mode, but the value we're storing is likely to be a double.
162 // Then we should turn this into a conversion to Double array followed by the
163 // push. On the other hand, we absolutely don't want to refine based on the
164 // base prediction. If it has non-cell garbage in it, then we want that to be
165 // ignored. That's because ArrayPush can't handle any array modes that aren't
166 // array-related - so if refine() turned this into a "Generic" ArrayPush then
167 // that would break things.
169 node.arrayMode().refine(
170 m_graph[node.child1()].prediction() & SpecCell,
172 m_graph[node.child2()].prediction()));
173 blessArrayOperation(node.child1(), node.child2(), 2);
175 Node* nodePtr = &m_graph[m_compileIndex];
176 switch (nodePtr->arrayMode().type()) {
187 blessArrayOperation(node.child1(), node.child2(), 1);
192 if (m_graph[node.child1()].shouldSpeculateNumber()
193 && node.mustGenerate()) {
194 node.clearFlags(NodeMustGenerate);
195 m_graph.deref(m_compileIndex);
206 fixIntEdge(node.children.child1());
207 fixIntEdge(node.children.child2());
215 case CompareGreaterEq:
216 case CompareStrictEq: {
217 if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]))
219 if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()]))
227 if (m_graph[node.child1()].shouldSpeculateInteger())
229 if (!m_graph[node.child1()].shouldSpeculateNumber())
236 if (!m_graph[node.child1()].shouldSpeculateInteger()
237 && m_graph[node.child1()].shouldSpeculateNumber())
240 Node& myNode = m_graph[m_compileIndex]; // reload because the graph may have changed
241 Edge logicalNotEdge = myNode.child1();
242 Node& logicalNot = m_graph[logicalNotEdge];
243 if (logicalNot.op() == LogicalNot
244 && logicalNot.adjustedRefCount() == 1) {
245 Edge newChildEdge = logicalNot.child1();
246 if (m_graph[newChildEdge].hasBooleanResult()) {
247 m_graph.ref(newChildEdge);
248 m_graph.deref(logicalNotEdge);
249 myNode.children.setChild1(newChildEdge);
251 BlockIndex toBeTaken = myNode.notTakenBlockIndex();
252 BlockIndex toBeNotTaken = myNode.takenBlockIndex();
253 myNode.setTakenBlockIndex(toBeTaken);
254 myNode.setNotTakenBlockIndex(toBeNotTaken);
261 if (node.variableAccessData()->isCaptured())
263 if (!node.variableAccessData()->shouldUseDoubleFormat())
271 if (m_graph.addShouldSpeculateInteger(node))
273 if (!Node::shouldSpeculateNumberExpectingDefined(m_graph[node.child1()], m_graph[node.child2()]))
281 if (m_graph.addShouldSpeculateInteger(node)
282 && node.canSpeculateInteger())
290 if (m_graph.negateShouldSpeculateInteger(node))
299 if (Node::shouldSpeculateIntegerForArithmetic(m_graph[node.child1()], m_graph[node.child2()])
300 && node.canSpeculateInteger())
308 if (m_graph.mulShouldSpeculateInteger(node))
316 if (Node::shouldSpeculateIntegerForArithmetic(m_graph[node.child1()], m_graph[node.child2()])
317 && node.canSpeculateInteger()) {
323 Node& oldDivision = m_graph[m_compileIndex];
325 Node newDivision = oldDivision;
326 newDivision.setRefCount(2);
327 newDivision.predict(SpecDouble);
328 NodeIndex newDivisionIndex = m_graph.size();
330 oldDivision.setOp(DoubleAsInt32);
331 oldDivision.children.initialize(Edge(newDivisionIndex, DoubleUse), Edge(), Edge());
333 m_graph.append(newDivision);
334 m_insertionSet.append(m_indexInBlock, newDivisionIndex);
344 if (m_graph[node.child1()].shouldSpeculateIntegerForArithmetic()
345 && node.canSpeculateInteger())
357 case PutByValAlias: {
358 Edge child1 = m_graph.varArgChild(node, 0);
359 Edge child2 = m_graph.varArgChild(node, 1);
360 Edge child3 = m_graph.varArgChild(node, 2);
363 node.arrayMode().refine(
364 m_graph[child1].prediction(),
365 m_graph[child2].prediction(),
366 m_graph[child3].prediction()));
368 blessArrayOperation(child1, child2, 3);
370 Node* nodePtr = &m_graph[m_compileIndex];
372 switch (nodePtr->arrayMode().modeForPut().type()) {
376 case Array::Int8Array:
377 case Array::Int16Array:
378 case Array::Int32Array:
379 case Array::Uint8Array:
380 case Array::Uint8ClampedArray:
381 case Array::Uint16Array:
382 case Array::Uint32Array:
383 if (!m_graph[child3].shouldSpeculateInteger())
386 case Array::Float32Array:
387 case Array::Float64Array:
397 for (unsigned i = m_graph.varArgNumChildren(node); i--;) {
398 node.setIndexingType(
399 leastUpperBoundOfIndexingTypeAndType(
400 node.indexingType(), m_graph[m_graph.varArgChild(node, i)].prediction()));
402 if (node.indexingType() == ArrayWithDouble) {
403 for (unsigned i = m_graph.varArgNumChildren(node); i--;)
413 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
414 if (!(node.flags() & NodeHasVarArgs)) {
415 dataLogF("new children: ");
416 node.dumpChildren(WTF::dataFile());
422 NodeIndex addNode(const Node& node, bool shouldGenerate)
424 NodeIndex nodeIndex = m_graph.size();
425 m_graph.append(node);
426 m_insertionSet.append(m_indexInBlock, nodeIndex);
428 m_graph[nodeIndex].ref();
432 NodeIndex checkArray(ArrayMode arrayMode, CodeOrigin codeOrigin, NodeIndex array, NodeIndex index, bool (*storageCheck)(const ArrayMode&) = canCSEStorage, bool shouldGenerate = true)
434 ASSERT(arrayMode.isSpecific());
438 Structure* structure = arrayMode.originalArrayStructure(m_graph, codeOrigin);
440 if (arrayMode.doesConversion()) {
445 Node arrayify(ArrayifyToStructure, codeOrigin, OpInfo(structure), OpInfo(arrayMode.asWord()), array, index);
447 NodeIndex arrayifyIndex = m_graph.size();
448 m_graph.append(arrayify);
449 m_insertionSet.append(m_indexInBlock, arrayifyIndex);
451 Node arrayify(Arrayify, codeOrigin, OpInfo(arrayMode.asWord()), array, index);
453 NodeIndex arrayifyIndex = m_graph.size();
454 m_graph.append(arrayify);
455 m_insertionSet.append(m_indexInBlock, arrayifyIndex);
459 Node checkStructure(CheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(structure)), array);
460 checkStructure.ref();
461 NodeIndex checkStructureIndex = m_graph.size();
462 m_graph.append(checkStructure);
463 m_insertionSet.append(m_indexInBlock, checkStructureIndex);
465 Node checkArray(CheckArray, codeOrigin, OpInfo(arrayMode.asWord()), array);
467 NodeIndex checkArrayIndex = m_graph.size();
468 m_graph.append(checkArray);
469 m_insertionSet.append(m_indexInBlock, checkArrayIndex);
473 if (!storageCheck(arrayMode))
479 if (arrayMode.usesButterfly())
480 return addNode(Node(GetButterfly, codeOrigin, array), shouldGenerate);
482 return addNode(Node(GetIndexedPropertyStorage, codeOrigin, OpInfo(arrayMode.asWord()), array), shouldGenerate);
485 void blessArrayOperation(Edge base, Edge index, unsigned storageChildIdx)
487 if (m_graph.m_fixpointState > BeforeFixpoint)
490 Node* nodePtr = &m_graph[m_compileIndex];
492 switch (nodePtr->arrayMode().type()) {
493 case Array::ForceExit: {
494 Node forceExit(ForceOSRExit, nodePtr->codeOrigin);
496 NodeIndex forceExitIndex = m_graph.size();
497 m_graph.append(forceExit);
498 m_insertionSet.append(m_indexInBlock, forceExitIndex);
502 case Array::SelectUsingPredictions:
503 case Array::Unprofiled:
504 ASSERT_NOT_REACHED();
511 NodeIndex storage = checkArray(nodePtr->arrayMode(), nodePtr->codeOrigin, base.index(), index.indexUnchecked());
512 if (storage == NoNode)
515 m_graph.child(m_graph[m_compileIndex], storageChildIdx) = Edge(storage);
520 void fixIntEdge(Edge& edge)
522 Node& node = m_graph[edge];
523 if (node.op() != ValueToInt32)
526 if (!m_graph[node.child1()].shouldSpeculateInteger())
530 Edge newEdge = node.child1();
532 m_graph.ref(newEdge);
533 m_graph.deref(oldEdge);
538 void fixDoubleEdge(unsigned childIndex)
540 Node& source = m_graph[m_compileIndex];
541 Edge& edge = m_graph.child(source, childIndex);
543 if (!m_graph[edge].shouldSpeculateInteger()) {
544 edge.setUseKind(DoubleUse);
548 NodeIndex resultIndex = (NodeIndex)m_graph.size();
550 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
551 dataLogF("(replacing @%u->@%u with @%u->@%u) ",
552 m_compileIndex, edge.index(), m_compileIndex, resultIndex);
555 // Fix the edge up here because it's a reference that will be clobbered by
556 // the append() below.
557 NodeIndex oldIndex = edge.index();
558 edge = Edge(resultIndex, DoubleUse);
560 m_graph.append(Node(Int32ToDouble, source.codeOrigin, oldIndex));
561 m_insertionSet.append(m_indexInBlock, resultIndex);
563 Node& int32ToDouble = m_graph[resultIndex];
564 int32ToDouble.predict(SpecDouble);
568 unsigned m_indexInBlock;
569 NodeIndex m_compileIndex;
570 InsertionSet<NodeIndex> m_insertionSet;
573 bool performFixup(Graph& graph)
575 SamplingRegion samplingRegion("DFG Fixup Phase");
576 return runPhase<FixupPhase>(graph);
579 } } // namespace JSC::DFG
581 #endif // ENABLE(DFG_JIT)