2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "DFGByteCodeCache.h"
32 #include "DFGCapabilities.h"
33 #include "CodeBlock.h"
34 #include <wtf/HashMap.h>
35 #include <wtf/MathExtras.h>
37 namespace JSC { namespace DFG {
39 // === ByteCodeParser ===
41 // This class is used to compile the dataflow graph from a CodeBlock.
42 class ByteCodeParser {
44 ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock, Graph& graph)
45 : m_globalData(globalData)
46 , m_codeBlock(codeBlock)
47 , m_profiledBlock(profiledBlock)
51 , m_constantUndefined(UINT_MAX)
52 , m_constantNull(UINT_MAX)
53 , m_constantNaN(UINT_MAX)
54 , m_constant1(UINT_MAX)
55 , m_constants(codeBlock->numberOfConstantRegisters())
56 , m_numArguments(codeBlock->numParameters())
57 , m_numLocals(codeBlock->m_numCalleeRegisters)
58 , m_preservedVars(codeBlock->m_numVars)
60 , m_numPassedVarArgs(0)
61 , m_globalResolveNumber(0)
63 , m_haveBuiltOperandMaps(false)
65 ASSERT(m_profiledBlock);
67 for (int i = 0; i < codeBlock->m_numVars; ++i)
68 m_preservedVars.set(i);
71 // Parse a full CodeBlock of bytecode.
75 // Just parse from m_currentIndex to the end of the current CodeBlock.
76 void parseCodeBlock();
78 // Helper for min and max.
79 bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
81 // Handle calls. This resolves issues surrounding inlining and intrinsics.
82 void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
83 void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
84 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
85 bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
86 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
87 bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction);
88 // Prepare to parse a block.
89 void prepareToParseBlock();
90 // Parse a single basic block of bytecode instructions.
91 bool parseBlock(unsigned limit);
92 // Find reachable code and setup predecessor links in the graph's BasicBlocks.
93 void determineReachability();
94 // Enqueue a block onto the worklist, if necessary.
95 void handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex, BlockIndex successor);
96 // Link block successors.
97 void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
98 void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
99 // Link GetLocal & SetLocal nodes, to ensure live values are generated.
104 template<PhiStackType stackType>
105 void processPhiStack();
106 // Add spill locations to nodes.
107 void allocateVirtualRegisters();
109 VariableAccessData* newVariableAccessData(int operand)
111 ASSERT(operand < FirstConstantRegisterIndex);
113 m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand)));
114 return &m_graph.m_variableAccessData.last();
117 // Get/Set the operands/result of a bytecode instruction.
118 NodeIndex getDirect(int operand)
120 // Is this a constant?
121 if (operand >= FirstConstantRegisterIndex) {
122 unsigned constant = operand - FirstConstantRegisterIndex;
123 ASSERT(constant < m_constants.size());
124 return getJSConstant(constant);
127 // Is this an argument?
128 if (operandIsArgument(operand))
129 return getArgument(operand);
132 return getLocal((unsigned)operand);
134 NodeIndex get(int operand)
136 return getDirect(m_inlineStackTop->remapOperand(operand));
138 void setDirect(int operand, NodeIndex value)
140 // Is this an argument?
141 if (operandIsArgument(operand)) {
142 setArgument(operand, value);
147 setLocal((unsigned)operand, value);
149 void set(int operand, NodeIndex value)
151 setDirect(m_inlineStackTop->remapOperand(operand), value);
154 // Used in implementing get/set, above, where the operand is a local variable.
155 NodeIndex getLocal(unsigned operand)
157 NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
159 if (nodeIndex != NoNode) {
160 Node* nodePtr = &m_graph[nodeIndex];
161 if (nodePtr->op == Flush) {
162 // Two possibilities: either the block wants the local to be live
163 // but has not loaded its value, or it has loaded its value, in
164 // which case we're done.
165 Node& flushChild = m_graph[nodePtr->child1()];
166 if (flushChild.op == Phi) {
167 VariableAccessData* variableAccessData = flushChild.variableAccessData();
168 nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1());
169 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
172 nodePtr = &flushChild;
174 if (nodePtr->op == GetLocal)
176 ASSERT(nodePtr->op == SetLocal);
177 return nodePtr->child1();
180 // Check for reads of temporaries from prior blocks,
181 // expand m_preservedVars to cover these.
182 m_preservedVars.set(operand);
184 VariableAccessData* variableAccessData = newVariableAccessData(operand);
186 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
187 m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
188 nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
189 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
191 m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
195 void setLocal(unsigned operand, NodeIndex value)
197 m_currentBlock->variablesAtTail.local(operand) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
200 // Used in implementing get/set, above, where the operand is an argument.
201 NodeIndex getArgument(unsigned operand)
203 unsigned argument = operandToArgument(operand);
204 ASSERT(argument < m_numArguments);
206 NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
208 if (nodeIndex != NoNode) {
209 Node* nodePtr = &m_graph[nodeIndex];
210 if (nodePtr->op == Flush) {
211 // Two possibilities: either the block wants the local to be live
212 // but has not loaded its value, or it has loaded its value, in
213 // which case we're done.
214 Node& flushChild = m_graph[nodePtr->child1()];
215 if (flushChild.op == Phi) {
216 VariableAccessData* variableAccessData = flushChild.variableAccessData();
217 nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1());
218 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
221 nodePtr = &flushChild;
223 if (nodePtr->op == SetArgument) {
224 // We're getting an argument in the first basic block; link
225 // the GetLocal to the SetArgument.
226 ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
227 nodeIndex = addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex);
228 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
232 if (nodePtr->op == GetLocal)
235 ASSERT(nodePtr->op == SetLocal);
236 return nodePtr->child1();
239 VariableAccessData* variableAccessData = newVariableAccessData(operand);
241 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
242 m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
243 nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
244 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
246 m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
250 void setArgument(int operand, NodeIndex value)
252 unsigned argument = operandToArgument(operand);
253 ASSERT(argument < m_numArguments);
255 m_currentBlock->variablesAtTail.argument(argument) = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
258 void flush(int operand)
260 // FIXME: This should check if the same operand had already been flushed to
261 // some other local variable.
263 operand = m_inlineStackTop->remapOperand(operand);
265 ASSERT(operand < FirstConstantRegisterIndex);
269 if (operandIsArgument(operand)) {
270 index = operandToArgument(operand);
271 nodeIndex = m_currentBlock->variablesAtTail.argument(index);
274 nodeIndex = m_currentBlock->variablesAtTail.local(index);
275 m_preservedVars.set(operand);
278 if (nodeIndex != NoNode) {
279 Node& node = m_graph[nodeIndex];
280 if (node.op == Flush || node.op == SetArgument) {
281 // If a local has already been flushed, or if it's an argument in the
282 // first basic block, then there is really no need to flush it. In fact
283 // emitting a Flush instruction could just confuse things, since the
284 // getArgument() code assumes that we never see a Flush of a SetArgument.
288 addToGraph(Flush, OpInfo(node.variableAccessData()), nodeIndex);
292 VariableAccessData* variableAccessData = newVariableAccessData(operand);
293 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
294 nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
295 if (operandIsArgument(operand)) {
296 m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
297 m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
298 m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
300 m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
301 m_currentBlock->variablesAtTail.local(index) = nodeIndex;
302 m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
306 // Get an operand, and perform a ToInt32/ToNumber conversion on it.
307 NodeIndex getToInt32(int operand)
309 return toInt32(get(operand));
311 NodeIndex getToNumber(int operand)
313 return toNumber(get(operand));
316 // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
317 NodeIndex toInt32(NodeIndex index)
319 Node& node = m_graph[index];
321 if (node.hasInt32Result())
324 if (node.op == UInt32ToNumber)
325 return node.child1();
327 // Check for numeric constants boxed as JSValues.
328 if (node.op == JSConstant) {
329 JSValue v = valueOfJSConstant(index);
331 return getJSConstant(node.constantNumber());
332 // FIXME: We could convert the double ToInteger at this point.
335 return addToGraph(ValueToInt32, index);
338 // Perform an ES5 ToNumber operation - returns a node of type NodeResultDouble.
339 NodeIndex toNumber(NodeIndex index)
341 Node& node = m_graph[index];
343 if (node.hasNumberResult())
346 if (node.op == JSConstant) {
347 JSValue v = valueOfJSConstant(index);
349 return getJSConstant(node.constantNumber());
352 return addToGraph(ValueToNumber, OpInfo(NodeUseBottom), index);
355 NodeIndex getJSConstant(unsigned constant)
357 NodeIndex index = m_constants[constant].asJSValue;
361 NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
362 m_constants[constant].asJSValue = resultIndex;
366 // Helper functions to get/set the this value.
369 return get(m_inlineStackTop->m_codeBlock->thisRegister());
371 void setThis(NodeIndex value)
373 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
376 // Convenience methods for checking nodes for constants.
377 bool isJSConstant(NodeIndex index)
379 return m_graph[index].op == JSConstant;
381 bool isInt32Constant(NodeIndex nodeIndex)
383 return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
385 bool isSmallInt32Constant(NodeIndex nodeIndex)
387 if (!isJSConstant(nodeIndex))
389 JSValue value = valueOfJSConstant(nodeIndex);
390 if (!value.isInt32())
392 int32_t intValue = value.asInt32();
393 return intValue >= -5 && intValue <= 5;
395 // Convenience methods for getting constant values.
396 JSValue valueOfJSConstant(NodeIndex index)
398 ASSERT(isJSConstant(index));
399 return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
401 int32_t valueOfInt32Constant(NodeIndex nodeIndex)
403 ASSERT(isInt32Constant(nodeIndex));
404 return valueOfJSConstant(nodeIndex).asInt32();
407 // This method returns a JSConstant with the value 'undefined'.
408 NodeIndex constantUndefined()
410 // Has m_constantUndefined been set up yet?
411 if (m_constantUndefined == UINT_MAX) {
412 // Search the constant pool for undefined, if we find it, we can just reuse this!
413 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
414 for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
415 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
416 if (testMe.isUndefined())
417 return getJSConstant(m_constantUndefined);
420 // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
421 ASSERT(m_constants.size() == numberOfConstants);
422 m_codeBlock->addConstant(jsUndefined());
423 m_constants.append(ConstantRecord());
424 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
427 // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
428 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
429 return getJSConstant(m_constantUndefined);
432 // This method returns a JSConstant with the value 'null'.
433 NodeIndex constantNull()
435 // Has m_constantNull been set up yet?
436 if (m_constantNull == UINT_MAX) {
437 // Search the constant pool for null, if we find it, we can just reuse this!
438 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
439 for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
440 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
442 return getJSConstant(m_constantNull);
445 // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
446 ASSERT(m_constants.size() == numberOfConstants);
447 m_codeBlock->addConstant(jsNull());
448 m_constants.append(ConstantRecord());
449 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
452 // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
453 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
454 return getJSConstant(m_constantNull);
457 // This method returns a DoubleConstant with the value 1.
460 // Has m_constant1 been set up yet?
461 if (m_constant1 == UINT_MAX) {
462 // Search the constant pool for the value 1, if we find it, we can just reuse this!
463 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
464 for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
465 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
466 if (testMe.isInt32() && testMe.asInt32() == 1)
467 return getJSConstant(m_constant1);
470 // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
471 ASSERT(m_constants.size() == numberOfConstants);
472 m_codeBlock->addConstant(jsNumber(1));
473 m_constants.append(ConstantRecord());
474 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
477 // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
478 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
479 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
480 return getJSConstant(m_constant1);
483 // This method returns a DoubleConstant with the value NaN.
484 NodeIndex constantNaN()
486 JSValue nan = jsNaN();
488 // Has m_constantNaN been set up yet?
489 if (m_constantNaN == UINT_MAX) {
490 // Search the constant pool for the value NaN, if we find it, we can just reuse this!
491 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
492 for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
493 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
494 if (JSValue::encode(testMe) == JSValue::encode(nan))
495 return getJSConstant(m_constantNaN);
498 // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
499 ASSERT(m_constants.size() == numberOfConstants);
500 m_codeBlock->addConstant(nan);
501 m_constants.append(ConstantRecord());
502 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
505 // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
506 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
507 ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
508 return getJSConstant(m_constantNaN);
511 NodeIndex cellConstant(JSCell* cell)
513 pair<HashMap<JSCell*, NodeIndex>::iterator, bool> iter = m_cellConstantNodes.add(cell, NoNode);
515 iter.first->second = addToGraph(WeakJSConstant, OpInfo(cell));
517 return iter.first->second;
520 CodeOrigin currentCodeOrigin()
522 return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame);
525 // These methods create a node and add it to the graph. If nodes of this type are
526 // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation.
527 NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
529 NodeIndex resultIndex = (NodeIndex)m_graph.size();
530 m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
532 if (op & NodeMustGenerate)
533 m_graph.ref(resultIndex);
536 NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
538 NodeIndex resultIndex = (NodeIndex)m_graph.size();
539 m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
541 if (op & NodeMustGenerate)
542 m_graph.ref(resultIndex);
545 NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
547 NodeIndex resultIndex = (NodeIndex)m_graph.size();
548 m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
550 if (op & NodeMustGenerate)
551 m_graph.ref(resultIndex);
555 NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
557 NodeIndex resultIndex = (NodeIndex)m_graph.size();
558 m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
560 m_numPassedVarArgs = 0;
562 if (op & NodeMustGenerate)
563 m_graph.ref(resultIndex);
566 void addVarArgChild(NodeIndex child)
568 m_graph.m_varArgChildren.append(child);
569 m_numPassedVarArgs++;
572 NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
574 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
576 PredictedType prediction = PredictNone;
577 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
578 prediction = getPrediction(m_graph.size(), m_currentIndex + OPCODE_LENGTH(op_call));
580 addVarArgChild(get(currentInstruction[1].u.operand));
581 int argCount = currentInstruction[2].u.operand;
582 if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
583 m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
585 int registerOffset = currentInstruction[3].u.operand;
586 int dummyThisArgument = op == Call ? 0 : 1;
587 for (int i = 0 + dummyThisArgument; i < argCount; ++i)
588 addVarArgChild(get(registerOffset + argumentToOperand(i)));
590 NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
591 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
592 set(putInstruction[1].u.operand, call);
596 PredictedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
598 UNUSED_PARAM(nodeIndex);
600 ValueProfile* profile = m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
602 PredictedType prediction = profile->computeUpdatedPrediction();
603 #if DFG_ENABLE(DEBUG_VERBOSE)
604 printf("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
610 PredictedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
612 PredictedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
614 if (prediction == PredictNone) {
615 // We have no information about what values this node generates. Give up
616 // on executing this code, since we're likely to do more damage than good.
617 addToGraph(ForceOSRExit);
623 PredictedType getPredictionWithoutOSRExit()
625 return getPredictionWithoutOSRExit(m_graph.size(), m_currentIndex);
628 PredictedType getPrediction()
630 return getPrediction(m_graph.size(), m_currentIndex);
633 NodeIndex makeSafe(NodeIndex nodeIndex)
635 if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
636 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
637 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
640 #if DFG_ENABLE(DEBUG_VERBOSE)
641 printf("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
644 switch (m_graph[nodeIndex].op) {
649 case ArithMod: // for ArithMode "MayOverflow" means we tried to divide by zero, or we saw double.
650 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
654 if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
655 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
656 #if DFG_ENABLE(DEBUG_VERBOSE)
657 printf("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
659 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
660 } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
661 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
662 #if DFG_ENABLE(DEBUG_VERBOSE)
663 printf("Making ArithMul @%u take faster slow case.\n", nodeIndex);
665 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayNegZero);
670 ASSERT_NOT_REACHED();
677 NodeIndex makeDivSafe(NodeIndex nodeIndex)
679 ASSERT(m_graph[nodeIndex].op == ArithDiv);
681 // The main slow case counter for op_div in the old JIT counts only when
682 // the operands are not numbers. We don't care about that since we already
683 // have speculations in place that take care of that separately. We only
684 // care about when the outcome of the division is not an integer, which
685 // is what the special fast case counter tells us.
687 if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex)
688 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
689 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
692 #if DFG_ENABLE(DEBUG_VERBOSE)
693 printf("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
696 // FIXME: It might be possible to make this more granular. The DFG certainly can
697 // distinguish between negative zero and overflow in its exit profiles.
698 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
703 bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
708 if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
711 for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
712 if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
719 void buildOperandMapsIfNecessary();
721 JSGlobalData* m_globalData;
722 CodeBlock* m_codeBlock;
723 CodeBlock* m_profiledBlock;
726 // The current block being generated.
727 BasicBlock* m_currentBlock;
728 // The bytecode index of the current instruction being generated.
729 unsigned m_currentIndex;
731 // We use these values during code generation, and to avoid the need for
732 // special handling we make sure they are available as constants in the
733 // CodeBlock's constant pool. These variables are initialized to
734 // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
735 // constant pool, as necessary.
736 unsigned m_constantUndefined;
737 unsigned m_constantNull;
738 unsigned m_constantNaN;
739 unsigned m_constant1;
740 HashMap<JSCell*, unsigned> m_cellConstants;
741 HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
743 // A constant in the constant pool may be represented by more than one
744 // node in the graph, depending on the context in which it is being used.
745 struct ConstantRecord {
758 // Track the index of the node whose result is the current value for every
759 // register value in the bytecode - argument, local, and temporary.
760 Vector<ConstantRecord, 16> m_constants;
762 // The number of arguments passed to the function.
763 unsigned m_numArguments;
764 // The number of locals (vars + temporaries) used in the function.
765 unsigned m_numLocals;
766 // The set of registers we need to preserve across BasicBlock boundaries;
767 // typically equal to the set of vars, but we expand this to cover all
768 // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
769 BitVector m_preservedVars;
770 // The number of slots (in units of sizeof(Register)) that we need to
771 // preallocate for calls emanating from this frame. This includes the
772 // size of the CallFrame, only if this is not a leaf function. (I.e.
773 // this is 0 if and only if this function is a leaf.)
774 unsigned m_parameterSlots;
775 // The number of var args passed to the next var arg node.
776 unsigned m_numPassedVarArgs;
777 // The index in the global resolve info.
778 unsigned m_globalResolveNumber;
780 struct PhiStackEntry {
781 PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
792 Vector<PhiStackEntry, 16> m_argumentPhiStack;
793 Vector<PhiStackEntry, 16> m_localPhiStack;
795 struct InlineStackEntry {
796 ByteCodeParser* m_byteCodeParser;
798 CodeBlock* m_codeBlock;
799 CodeBlock* m_profiledBlock;
800 InlineCallFrame* m_inlineCallFrame;
801 VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
803 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
805 QueryableExitProfile m_exitProfile;
807 // Remapping of identifier and constant numbers from the code block being
808 // inlined (inline callee) to the code block that we're inlining into
809 // (the machine code block, which is the transitive, though not necessarily
811 Vector<unsigned> m_identifierRemap;
812 Vector<unsigned> m_constantRemap;
814 // Blocks introduced by this code block, which need successor linking.
815 // May include up to one basic block that includes the continuation after
816 // the callsite in the caller. These must be appended in the order that they
817 // are created, but their bytecodeBegin values need not be in order as they
819 Vector<UnlinkedBlock> m_unlinkedBlocks;
821 // Potential block linking targets. Must be sorted by bytecodeBegin, and
822 // cannot have two blocks that have the same bytecodeBegin. For this very
823 // reason, this is not equivalent to
824 Vector<BlockIndex> m_blockLinkingTargets;
826 // If the callsite's basic block was split into two, then this will be
827 // the head of the callsite block. It needs its successors linked to the
828 // m_unlinkedBlocks, but not the other way around: there's no way for
829 // any blocks in m_unlinkedBlocks to jump back into this block.
830 BlockIndex m_callsiteBlockHead;
832 // Does the callsite block head need linking? This is typically true
833 // but will be false for the machine code block's inline stack entry
834 // (since that one is not inlined) and for cases where an inline callee
835 // did the linking for us.
836 bool m_callsiteBlockHeadNeedsLinking;
838 VirtualRegister m_returnValue;
840 // Did we see any returns? We need to handle the (uncommon but necessary)
841 // case where a procedure that does not return was inlined.
844 // Did we have any early returns?
845 bool m_didEarlyReturn;
847 InlineStackEntry* m_caller;
849 InlineStackEntry(ByteCodeParser*, CodeBlock*, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind);
853 m_byteCodeParser->m_inlineStackTop = m_caller;
856 int remapOperand(int operand) const
858 if (!m_inlineCallFrame)
861 if (operand >= FirstConstantRegisterIndex) {
862 int result = m_constantRemap[operand - FirstConstantRegisterIndex];
863 ASSERT(result >= FirstConstantRegisterIndex);
867 return operand + m_inlineCallFrame->stackOffset;
871 InlineStackEntry* m_inlineStackTop;
873 // Have we built operand maps? We initialize them lazily, and only when doing
875 bool m_haveBuiltOperandMaps;
876 // Mapping between identifier names and numbers.
877 IdentifierMap m_identifierMap;
878 // Mapping between values and constant numbers.
879 JSValueMap m_jsValueMap;
881 // Cache of code blocks that we've generated bytecode for.
882 ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
885 #define NEXT_OPCODE(name) \
886 m_currentIndex += OPCODE_LENGTH(name); \
889 #define LAST_OPCODE(name) \
890 m_currentIndex += OPCODE_LENGTH(name); \
891 return shouldContinueParsing
893 void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
895 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
897 NodeIndex callTarget = get(currentInstruction[1].u.operand);
898 enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
900 #if DFG_ENABLE(DEBUG_VERBOSE)
901 printf("Slow case count for call at @%lu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
904 if (m_graph.isFunctionConstant(m_codeBlock, callTarget))
905 callType = ConstantFunction;
906 else if (!!m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee
907 && !m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
908 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
909 callType = LinkedFunction;
911 callType = UnknownFunction;
912 if (callType != UnknownFunction) {
913 int argumentCountIncludingThis = currentInstruction[2].u.operand;
914 int registerOffset = currentInstruction[3].u.operand;
916 // Do we have a result?
917 bool usesResult = false;
918 int resultOperand = 0; // make compiler happy
919 unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
920 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
921 PredictedType prediction = PredictNone;
922 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
923 resultOperand = putInstruction[1].u.operand;
925 prediction = getPrediction(m_graph.size(), nextOffset);
926 nextOffset += OPCODE_LENGTH(op_call_put_result);
928 JSFunction* expectedFunction;
930 bool certainAboutExpectedFunction;
931 if (callType == ConstantFunction) {
932 expectedFunction = m_graph.valueOfFunctionConstant(m_codeBlock, callTarget);
933 intrinsic = expectedFunction->executable()->intrinsicFor(kind);
934 certainAboutExpectedFunction = true;
936 ASSERT(callType == LinkedFunction);
937 expectedFunction = m_inlineStackTop->m_profiledBlock->getCallLinkInfo(m_currentIndex).lastSeenCallee.get();
938 intrinsic = expectedFunction->executable()->intrinsicFor(kind);
939 certainAboutExpectedFunction = false;
942 if (intrinsic != NoIntrinsic) {
943 if (!certainAboutExpectedFunction)
944 emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
946 if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
947 if (!certainAboutExpectedFunction) {
948 // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
949 // to, since at this point we know that the call target is a constant. It's just that OSR isn't
950 // smart enough to figure that out, since it doesn't understand CheckFunction.
951 addToGraph(Phantom, callTarget);
956 } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
960 addCall(interpreter, currentInstruction, op);
963 void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
965 NodeIndex thisArgument;
966 if (kind == CodeForCall)
967 thisArgument = get(registerOffset + argumentToOperand(0));
969 thisArgument = NoNode;
970 addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
973 bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
975 // First, the really simple checks: do we have an actual JS function?
976 if (!expectedFunction)
978 if (expectedFunction->isHostFunction())
981 FunctionExecutable* executable = expectedFunction->jsExecutable();
983 // Does the number of arguments we're passing match the arity of the target? We could
984 // inline arity check failures, but for simplicity we currently don't.
985 if (static_cast<int>(executable->parameterCount()) + 1 != argumentCountIncludingThis)
988 // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
989 // If either of these are detected, then don't inline.
991 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
993 if (depth >= Options::maximumInliningDepth)
994 return false; // Depth exceeded.
996 if (entry->executable() == executable)
997 return false; // Recursion detected.
1000 // Does the code block's size match the heuristics/requirements for being
1001 // an inline candidate?
1002 CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
1003 if (!mightInlineFunctionFor(profiledBlock, kind))
1006 // If we get here then it looks like we should definitely inline this code. Proceed
1007 // with parsing the code to get bytecode, so that we can then parse the bytecode.
1008 CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
1012 ASSERT(canInlineFunctionFor(codeBlock, kind));
1014 #if DFG_ENABLE(DEBUG_VERBOSE)
1015 printf("Inlining executable %p.\n", executable);
1018 // Now we know without a doubt that we are committed to inlining. So begin the process
1019 // by checking the callee (if necessary) and making sure that arguments and the callee
1021 if (!certainAboutExpectedFunction)
1022 emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
1024 // FIXME: Don't flush constants!
1026 for (int i = 1; i < argumentCountIncludingThis; ++i)
1027 flush(registerOffset + argumentToOperand(i));
1029 int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
1031 // Make sure that the area used by the call frame is reserved.
1032 for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart + 1;)
1033 m_preservedVars.set(m_inlineStackTop->remapOperand(arg));
1035 // Make sure that we have enough locals.
1036 unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
1037 if (newNumLocals > m_numLocals) {
1038 m_numLocals = newNumLocals;
1039 for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
1040 m_graph.m_blocks[i]->ensureLocals(newNumLocals);
1043 InlineStackEntry inlineStackEntry(this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1, (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction, (VirtualRegister)m_inlineStackTop->remapOperand(usesResult ? resultOperand : InvalidVirtualRegister), (VirtualRegister)inlineCallFrameStart, kind);
1045 // This is where the actual inlining really happens.
1046 unsigned oldIndex = m_currentIndex;
1049 addToGraph(InlineStart);
1053 m_currentIndex = oldIndex;
1055 // If the inlined code created some new basic blocks, then we have linking to do.
1056 if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
1058 ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1059 if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1060 linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
1062 ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
1064 // It's possible that the callsite block head is not owned by the caller.
1065 if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1066 // It's definitely owned by the caller, because the caller created new blocks.
1067 // Assert that this all adds up.
1068 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
1069 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1070 inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1072 // It's definitely not owned by the caller. Tell the caller that he does not
1073 // need to link his callsite block head, because we did it for him.
1074 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1075 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1076 inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1079 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1081 ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1083 // If there was a return, but no early returns, then we're done. We allow parsing of
1084 // the caller to continue in whatever basic block we're in right now.
1085 if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1086 BasicBlock* lastBlock = m_graph.m_blocks.last().get();
1087 ASSERT(lastBlock->begin == lastBlock->end || !m_graph.last().isTerminal());
1089 // If we created new blocks then the last block needs linking, but in the
1090 // caller. It doesn't need to be linked to, but it needs outgoing links.
1091 if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1092 #if DFG_ENABLE(DEBUG_VERBOSE)
1093 printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
1095 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1096 // for release builds because this block will never serve as a potential target
1097 // in the linker's binary search.
1098 lastBlock->bytecodeBegin = m_currentIndex;
1099 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
1102 m_currentBlock = m_graph.m_blocks.last().get();
1104 #if DFG_ENABLE(DEBUG_VERBOSE)
1105 printf("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
1110 // If we get to this point then all blocks must end in some sort of terminals.
1111 ASSERT(m_graph.last().isTerminal());
1113 // Link the early returns to the basic block we're about to create.
1114 for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1115 if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1117 BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
1118 ASSERT(!block->isLinked);
1119 Node& node = m_graph[block->end - 1];
1120 ASSERT(node.op == Jump);
1121 ASSERT(node.takenBlockIndex() == NoBlock);
1122 node.setTakenBlockIndex(m_graph.m_blocks.size());
1123 inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1124 #if !ASSERT_DISABLED
1125 block->isLinked = true;
1129 // Need to create a new basic block for the continuation at the caller.
1130 OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_graph.size(), m_numArguments, m_numLocals));
1131 #if DFG_ENABLE(DEBUG_VERBOSE)
1132 printf("Creating inline epilogue basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
1134 m_currentBlock = block.get();
1135 ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
1136 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
1137 m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
1138 m_graph.m_blocks.append(block.release());
1139 prepareToParseBlock();
1141 // At this point we return and continue to generate code for the caller, but
1142 // in the new basic block.
1143 #if DFG_ENABLE(DEBUG_VERBOSE)
1144 printf("Done inlining executable %p, continuing code generation in new block.\n", executable);
1149 bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1154 if (argumentCountIncludingThis == 1) { // Math.min()
1155 set(resultOperand, constantNaN());
1159 if (argumentCountIncludingThis == 2) { // Math.min(x)
1160 set(resultOperand, getToNumber(registerOffset + argumentToOperand(1)));
1164 if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1165 set(resultOperand, addToGraph(op, OpInfo(NodeUseBottom), getToNumber(registerOffset + argumentToOperand(1)), getToNumber(registerOffset + argumentToOperand(2))));
1169 // Don't handle >=3 arguments for now.
1173 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1174 // they need to perform the ToNumber conversion, which can have side-effects.
1175 bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction)
1177 switch (intrinsic) {
1178 case AbsIntrinsic: {
1180 // There is no such thing as executing abs for effect, so this
1185 if (argumentCountIncludingThis == 1) { // Math.abs()
1186 set(resultOperand, constantNaN());
1190 if (!MacroAssembler::supportsFloatingPointAbs())
1193 NodeIndex nodeIndex = addToGraph(ArithAbs, OpInfo(NodeUseBottom), getToNumber(registerOffset + argumentToOperand(1)));
1194 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1195 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
1196 set(resultOperand, nodeIndex);
1201 return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1204 return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1206 case SqrtIntrinsic: {
1210 if (argumentCountIncludingThis == 1) { // Math.sqrt()
1211 set(resultOperand, constantNaN());
1215 if (!MacroAssembler::supportsFloatingPointSqrt())
1218 set(resultOperand, addToGraph(ArithSqrt, getToNumber(registerOffset + argumentToOperand(1))));
1222 case ArrayPushIntrinsic: {
1223 if (argumentCountIncludingThis != 2)
1226 NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1228 set(resultOperand, arrayPush);
1233 case ArrayPopIntrinsic: {
1234 if (argumentCountIncludingThis != 1)
1237 NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
1239 set(resultOperand, arrayPop);
1243 case CharCodeAtIntrinsic: {
1244 if (argumentCountIncludingThis != 2)
1247 int thisOperand = registerOffset + argumentToOperand(0);
1248 if (!(m_graph[get(thisOperand)].prediction() & PredictString))
1251 int indexOperand = registerOffset + argumentToOperand(1);
1252 NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1253 NodeIndex charCode = addToGraph(StringCharCodeAt, get(thisOperand), getToInt32(indexOperand), storage);
1256 set(resultOperand, charCode);
1260 case CharAtIntrinsic: {
1261 if (argumentCountIncludingThis != 2)
1264 int thisOperand = registerOffset + argumentToOperand(0);
1265 if (!(m_graph[get(thisOperand)].prediction() & PredictString))
1268 int indexOperand = registerOffset + argumentToOperand(1);
1269 NodeIndex storage = addToGraph(GetIndexedPropertyStorage, get(thisOperand), getToInt32(indexOperand));
1270 NodeIndex charCode = addToGraph(StringCharAt, get(thisOperand), getToInt32(indexOperand), storage);
1273 set(resultOperand, charCode);
1282 void ByteCodeParser::prepareToParseBlock()
1284 for (unsigned i = 0; i < m_constants.size(); ++i)
1285 m_constants[i] = ConstantRecord();
1286 m_cellConstantNodes.clear();
1289 bool ByteCodeParser::parseBlock(unsigned limit)
1291 bool shouldContinueParsing = true;
1293 Interpreter* interpreter = m_globalData->interpreter;
1294 Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
1295 unsigned blockBegin = m_currentIndex;
1297 // If we are the first basic block, introduce markers for arguments. This allows
1298 // us to track if a use of an argument may use the actual argument passed, as
1299 // opposed to using a value we set explicitly.
1300 if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
1301 m_graph.m_arguments.resize(m_numArguments);
1302 for (unsigned argument = 0; argument < m_numArguments; ++argument) {
1303 NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argumentToOperand(argument))));
1304 m_graph.m_arguments[argument] = setArgument;
1305 m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
1306 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
1311 // Don't extend over jump destinations.
1312 if (m_currentIndex == limit) {
1313 // Ordinarily we want to plant a jump. But refuse to do this if the block is
1314 // empty. This is a special case for inlining, which might otherwise create
1315 // some empty blocks in some cases. When parseBlock() returns with an empty
1316 // block, it will get repurposed instead of creating a new one. Note that this
1317 // logic relies on every bytecode resulting in one or more nodes, which would
1318 // be true anyway except for op_loop_hint, which emits a Phantom to force this
1320 if (m_currentBlock->begin != m_graph.size())
1321 addToGraph(Jump, OpInfo(m_currentIndex));
1323 #if DFG_ENABLE(DEBUG_VERBOSE)
1324 printf("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
1327 return shouldContinueParsing;
1330 // Switch on the current bytecode opcode.
1331 Instruction* currentInstruction = instructionsBegin + m_currentIndex;
1332 OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
1335 // === Function entry opcodes ===
1338 // Initialize all locals to undefined.
1339 for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
1340 set(i, constantUndefined());
1341 NEXT_OPCODE(op_enter);
1343 case op_convert_this: {
1344 NodeIndex op1 = getThis();
1345 if (m_graph[op1].op == ConvertThis)
1348 setThis(addToGraph(ConvertThis, op1));
1349 NEXT_OPCODE(op_convert_this);
1352 case op_create_this: {
1353 NodeIndex op1 = get(currentInstruction[2].u.operand);
1354 set(currentInstruction[1].u.operand, addToGraph(CreateThis, op1));
1355 NEXT_OPCODE(op_create_this);
1358 case op_new_object: {
1359 set(currentInstruction[1].u.operand, addToGraph(NewObject));
1360 NEXT_OPCODE(op_new_object);
1363 case op_new_array: {
1364 int startOperand = currentInstruction[2].u.operand;
1365 int numOperands = currentInstruction[3].u.operand;
1366 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1367 addVarArgChild(get(operandIdx));
1368 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
1369 NEXT_OPCODE(op_new_array);
1372 case op_new_array_buffer: {
1373 int startConstant = currentInstruction[2].u.operand;
1374 int numConstants = currentInstruction[3].u.operand;
1375 set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
1376 NEXT_OPCODE(op_new_array_buffer);
1379 case op_new_regexp: {
1380 set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
1381 NEXT_OPCODE(op_new_regexp);
1384 case op_get_callee: {
1385 if (m_inlineStackTop->m_inlineCallFrame)
1386 set(currentInstruction[1].u.operand, getDirect(m_inlineStackTop->m_calleeVR));
1388 set(currentInstruction[1].u.operand, addToGraph(GetCallee));
1389 NEXT_OPCODE(op_get_callee);
1392 // === Bitwise operations ===
1395 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1396 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1397 set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
1398 NEXT_OPCODE(op_bitand);
1402 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1403 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1404 set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
1405 NEXT_OPCODE(op_bitor);
1409 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1410 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1411 set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
1412 NEXT_OPCODE(op_bitxor);
1416 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1417 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1419 // Optimize out shifts by zero.
1420 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1423 result = addToGraph(BitRShift, op1, op2);
1424 set(currentInstruction[1].u.operand, result);
1425 NEXT_OPCODE(op_rshift);
1429 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1430 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1432 // Optimize out shifts by zero.
1433 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1436 result = addToGraph(BitLShift, op1, op2);
1437 set(currentInstruction[1].u.operand, result);
1438 NEXT_OPCODE(op_lshift);
1442 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
1443 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
1445 // The result of a zero-extending right shift is treated as an unsigned value.
1446 // This means that if the top bit is set, the result is not in the int32 range,
1447 // and as such must be stored as a double. If the shift amount is a constant,
1448 // we may be able to optimize.
1449 if (isInt32Constant(op2)) {
1450 // If we know we are shifting by a non-zero amount, then since the operation
1451 // zero fills we know the top bit of the result must be zero, and as such the
1452 // result must be within the int32 range. Conversely, if this is a shift by
1453 // zero, then the result may be changed by the conversion to unsigned, but it
1454 // is not necessary to perform the shift!
1455 if (valueOfInt32Constant(op2) & 0x1f)
1456 result = addToGraph(BitURShift, op1, op2);
1458 result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), op1));
1460 // Cannot optimize at this stage; shift & potentially rebox as a double.
1461 result = addToGraph(BitURShift, op1, op2);
1462 result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), result));
1464 set(currentInstruction[1].u.operand, result);
1465 NEXT_OPCODE(op_urshift);
1468 // === Increment/Decrement opcodes ===
1471 unsigned srcDst = currentInstruction[1].u.operand;
1472 NodeIndex op = getToNumber(srcDst);
1473 set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
1474 NEXT_OPCODE(op_pre_inc);
1478 unsigned result = currentInstruction[1].u.operand;
1479 unsigned srcDst = currentInstruction[2].u.operand;
1480 ASSERT(result != srcDst); // Required for assumptions we make during OSR.
1481 NodeIndex op = getToNumber(srcDst);
1483 set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
1484 NEXT_OPCODE(op_post_inc);
1488 unsigned srcDst = currentInstruction[1].u.operand;
1489 NodeIndex op = getToNumber(srcDst);
1490 set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
1491 NEXT_OPCODE(op_pre_dec);
1495 unsigned result = currentInstruction[1].u.operand;
1496 unsigned srcDst = currentInstruction[2].u.operand;
1497 NodeIndex op = getToNumber(srcDst);
1499 set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
1500 NEXT_OPCODE(op_post_dec);
1503 // === Arithmetic operations ===
1506 NodeIndex op1 = get(currentInstruction[2].u.operand);
1507 NodeIndex op2 = get(currentInstruction[3].u.operand);
1508 if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
1509 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), toNumber(op1), toNumber(op2))));
1511 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, OpInfo(NodeUseBottom), op1, op2)));
1512 NEXT_OPCODE(op_add);
1516 NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1517 NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1518 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op1, op2)));
1519 NEXT_OPCODE(op_sub);
1523 // Multiply requires that the inputs are not truncated, unfortunately.
1524 NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1525 NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1526 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, OpInfo(NodeUseBottom), op1, op2)));
1527 NEXT_OPCODE(op_mul);
1531 NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1532 NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1533 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, OpInfo(NodeUseBottom), op1, op2)));
1534 NEXT_OPCODE(op_mod);
1538 NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
1539 NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
1540 set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, OpInfo(NodeUseBottom), op1, op2)));
1541 NEXT_OPCODE(op_div);
1544 // === Misc operations ===
1546 #if ENABLE(DEBUG_WITH_BREAKPOINT)
1548 addToGraph(Breakpoint);
1549 NEXT_OPCODE(op_debug);
1552 NodeIndex op = get(currentInstruction[2].u.operand);
1553 set(currentInstruction[1].u.operand, op);
1554 NEXT_OPCODE(op_mov);
1557 case op_check_has_instance:
1558 addToGraph(CheckHasInstance, get(currentInstruction[1].u.operand));
1559 NEXT_OPCODE(op_check_has_instance);
1561 case op_instanceof: {
1562 NodeIndex value = get(currentInstruction[2].u.operand);
1563 NodeIndex baseValue = get(currentInstruction[3].u.operand);
1564 NodeIndex prototype = get(currentInstruction[4].u.operand);
1565 set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
1566 NEXT_OPCODE(op_instanceof);
1570 NodeIndex value = get(currentInstruction[2].u.operand);
1571 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
1572 NEXT_OPCODE(op_not);
1575 case op_to_primitive: {
1576 NodeIndex value = get(currentInstruction[2].u.operand);
1577 set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
1578 NEXT_OPCODE(op_to_primitive);
1582 int startOperand = currentInstruction[2].u.operand;
1583 int numOperands = currentInstruction[3].u.operand;
1584 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1585 addVarArgChild(get(operandIdx));
1586 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
1587 NEXT_OPCODE(op_strcat);
1591 NodeIndex op1 = get(currentInstruction[2].u.operand);
1592 NodeIndex op2 = get(currentInstruction[3].u.operand);
1593 set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
1594 NEXT_OPCODE(op_less);
1598 NodeIndex op1 = get(currentInstruction[2].u.operand);
1599 NodeIndex op2 = get(currentInstruction[3].u.operand);
1600 set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
1601 NEXT_OPCODE(op_lesseq);
1605 NodeIndex op1 = get(currentInstruction[2].u.operand);
1606 NodeIndex op2 = get(currentInstruction[3].u.operand);
1607 set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
1608 NEXT_OPCODE(op_greater);
1611 case op_greatereq: {
1612 NodeIndex op1 = get(currentInstruction[2].u.operand);
1613 NodeIndex op2 = get(currentInstruction[3].u.operand);
1614 set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
1615 NEXT_OPCODE(op_greatereq);
1619 NodeIndex op1 = get(currentInstruction[2].u.operand);
1620 NodeIndex op2 = get(currentInstruction[3].u.operand);
1621 set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
1626 NodeIndex value = get(currentInstruction[2].u.operand);
1627 set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
1628 NEXT_OPCODE(op_eq_null);
1632 NodeIndex op1 = get(currentInstruction[2].u.operand);
1633 NodeIndex op2 = get(currentInstruction[3].u.operand);
1634 set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
1635 NEXT_OPCODE(op_stricteq);
1639 NodeIndex op1 = get(currentInstruction[2].u.operand);
1640 NodeIndex op2 = get(currentInstruction[3].u.operand);
1641 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
1642 NEXT_OPCODE(op_neq);
1646 NodeIndex value = get(currentInstruction[2].u.operand);
1647 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
1648 NEXT_OPCODE(op_neq_null);
1651 case op_nstricteq: {
1652 NodeIndex op1 = get(currentInstruction[2].u.operand);
1653 NodeIndex op2 = get(currentInstruction[3].u.operand);
1654 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
1655 NEXT_OPCODE(op_nstricteq);
1658 // === Property access operations ===
1660 case op_get_by_val: {
1661 PredictedType prediction = getPrediction();
1663 NodeIndex base = get(currentInstruction[2].u.operand);
1664 NodeIndex property = get(currentInstruction[3].u.operand);
1665 NodeIndex propertyStorage = addToGraph(GetIndexedPropertyStorage, base, property);
1666 NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property, propertyStorage);
1667 set(currentInstruction[1].u.operand, getByVal);
1669 NEXT_OPCODE(op_get_by_val);
1672 case op_put_by_val: {
1673 NodeIndex base = get(currentInstruction[1].u.operand);
1674 NodeIndex property = get(currentInstruction[2].u.operand);
1675 NodeIndex value = get(currentInstruction[3].u.operand);
1677 addToGraph(PutByVal, base, property, value);
1679 NEXT_OPCODE(op_put_by_val);
1682 case op_method_check: {
1683 Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
1685 PredictedType prediction = getPrediction();
1687 ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
1689 NodeIndex base = get(getInstruction[2].u.operand);
1690 unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand];
1692 // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
1693 // node, which is a lot more efficient.
1694 StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
1695 MethodCallLinkInfo& methodCall = m_inlineStackTop->m_profiledBlock->getMethodCallLinkInfo(m_currentIndex);
1698 && !!methodCall.cachedStructure
1700 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1701 // It's monomorphic as far as we can tell, since the method_check was linked
1702 // but the slow path (i.e. the normal get_by_id) never fired.
1704 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedStructure.get())), base);
1705 if (methodCall.cachedPrototype.get() != m_inlineStackTop->m_profiledBlock->globalObject()->methodCallDummy())
1706 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCall.cachedPrototypeStructure.get())), cellConstant(methodCall.cachedPrototype.get()));
1708 set(getInstruction[1].u.operand, cellConstant(methodCall.cachedFunction.get()));
1710 set(getInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifier), OpInfo(prediction), base));
1712 m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
1715 case op_get_scoped_var: {
1716 PredictedType prediction = getPrediction();
1717 int dst = currentInstruction[1].u.operand;
1718 int slot = currentInstruction[2].u.operand;
1719 int depth = currentInstruction[3].u.operand;
1720 NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1721 NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
1722 set(dst, getScopedVar);
1723 NEXT_OPCODE(op_get_scoped_var);
1725 case op_put_scoped_var: {
1726 int slot = currentInstruction[1].u.operand;
1727 int depth = currentInstruction[2].u.operand;
1728 int source = currentInstruction[3].u.operand;
1729 NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1730 addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
1731 NEXT_OPCODE(op_put_scoped_var);
1733 case op_get_by_id: {
1734 PredictedType prediction = getPredictionWithoutOSRExit();
1736 NodeIndex base = get(currentInstruction[2].u.operand);
1737 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
1739 Identifier identifier = m_codeBlock->identifier(identifierNumber);
1740 StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
1742 #if DFG_ENABLE(DEBUG_VERBOSE)
1743 printf("Slow case count for GetById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
1746 size_t offset = notFound;
1747 StructureSet structureSet;
1749 && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
1750 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1751 switch (stubInfo.accessType) {
1752 case access_get_by_id_self: {
1753 Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
1754 offset = structure->get(*m_globalData, identifier);
1756 if (offset != notFound)
1757 structureSet.add(structure);
1759 if (offset != notFound)
1760 ASSERT(structureSet.size());
1764 case access_get_by_id_self_list: {
1765 PolymorphicAccessStructureList* list = stubInfo.u.getByIdProtoList.structureList;
1766 unsigned size = stubInfo.u.getByIdProtoList.listSize;
1767 for (unsigned i = 0; i < size; ++i) {
1768 if (!list->list[i].isDirect) {
1773 Structure* structure = list->list[i].base.get();
1774 if (structureSet.contains(structure))
1777 size_t myOffset = structure->get(*m_globalData, identifier);
1779 if (myOffset == notFound) {
1786 else if (offset != myOffset) {
1791 structureSet.add(structure);
1794 if (offset != notFound)
1795 ASSERT(structureSet.size());
1800 ASSERT(offset == notFound);
1805 if (offset != notFound) {
1806 ASSERT(structureSet.size());
1808 // The implementation of GetByOffset does not know to terminate speculative
1809 // execution if it doesn't have a prediction, so we do it manually.
1810 if (prediction == PredictNone)
1811 addToGraph(ForceOSRExit);
1813 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), base);
1814 set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), addToGraph(GetPropertyStorage, base)));
1816 StorageAccessData storageAccessData;
1817 storageAccessData.offset = offset;
1818 storageAccessData.identifierNumber = identifierNumber;
1819 m_graph.m_storageAccessData.append(storageAccessData);
1821 set(currentInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
1823 NEXT_OPCODE(op_get_by_id);
1826 case op_put_by_id: {
1827 NodeIndex value = get(currentInstruction[3].u.operand);
1828 NodeIndex base = get(currentInstruction[1].u.operand);
1829 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
1830 bool direct = currentInstruction[8].u.operand;
1832 StructureStubInfo& stubInfo = m_inlineStackTop->m_profiledBlock->getStubInfo(m_currentIndex);
1834 addToGraph(ForceOSRExit);
1836 bool alreadyGenerated = false;
1838 #if DFG_ENABLE(DEBUG_VERBOSE)
1839 printf("Slow case count for PutById @%lu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
1843 && !m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
1844 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1845 switch (stubInfo.accessType) {
1846 case access_put_by_id_replace: {
1847 Structure* structure = stubInfo.u.putByIdReplace.baseObjectStructure.get();
1848 Identifier identifier = m_codeBlock->identifier(identifierNumber);
1849 size_t offset = structure->get(*m_globalData, identifier);
1851 if (offset != notFound) {
1852 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), base);
1853 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
1855 StorageAccessData storageAccessData;
1856 storageAccessData.offset = offset;
1857 storageAccessData.identifierNumber = identifierNumber;
1858 m_graph.m_storageAccessData.append(storageAccessData);
1860 alreadyGenerated = true;
1865 case access_put_by_id_transition_normal:
1866 case access_put_by_id_transition_direct: {
1867 Structure* previousStructure = stubInfo.u.putByIdTransition.previousStructure.get();
1868 Structure* newStructure = stubInfo.u.putByIdTransition.structure.get();
1870 if (previousStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity())
1873 StructureChain* structureChain = stubInfo.u.putByIdTransition.chain.get();
1875 Identifier identifier = m_codeBlock->identifier(identifierNumber);
1876 size_t offset = newStructure->get(*m_globalData, identifier);
1878 if (offset != notFound && structureChainIsStillValid(direct, previousStructure, structureChain)) {
1879 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure)), base);
1881 if (!previousStructure->storedPrototype().isNull())
1882 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure->storedPrototype().asCell()->structure())), cellConstant(previousStructure->storedPrototype().asCell()));
1884 for (WriteBarrier<Structure>* it = structureChain->head(); *it; ++it) {
1885 JSValue prototype = (*it)->storedPrototype();
1886 if (prototype.isNull())
1888 ASSERT(prototype.isCell());
1889 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())), cellConstant(prototype.asCell()));
1892 addToGraph(PutStructure, OpInfo(m_graph.addStructureTransitionData(StructureTransitionData(previousStructure, newStructure))), base);
1894 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
1896 StorageAccessData storageAccessData;
1897 storageAccessData.offset = offset;
1898 storageAccessData.identifierNumber = identifierNumber;
1899 m_graph.m_storageAccessData.append(storageAccessData);
1901 alreadyGenerated = true;
1911 if (!alreadyGenerated) {
1913 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
1915 addToGraph(PutById, OpInfo(identifierNumber), base, value);
1918 NEXT_OPCODE(op_put_by_id);
1921 case op_get_global_var: {
1922 PredictedType prediction = getPrediction();
1924 NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand));
1925 set(currentInstruction[1].u.operand, getGlobalVar);
1926 m_graph.predictGlobalVar(currentInstruction[2].u.operand, prediction);
1927 NEXT_OPCODE(op_get_global_var);
1930 case op_put_global_var: {
1931 NodeIndex value = get(currentInstruction[2].u.operand);
1932 addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value);
1933 NEXT_OPCODE(op_put_global_var);
1936 // === Block terminators. ===
1939 unsigned relativeOffset = currentInstruction[1].u.operand;
1940 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
1941 LAST_OPCODE(op_jmp);
1945 unsigned relativeOffset = currentInstruction[1].u.operand;
1946 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
1947 LAST_OPCODE(op_loop);
1951 unsigned relativeOffset = currentInstruction[2].u.operand;
1952 NodeIndex condition = get(currentInstruction[1].u.operand);
1953 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
1954 LAST_OPCODE(op_jtrue);
1958 unsigned relativeOffset = currentInstruction[2].u.operand;
1959 NodeIndex condition = get(currentInstruction[1].u.operand);
1960 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
1961 LAST_OPCODE(op_jfalse);
1964 case op_loop_if_true: {
1965 unsigned relativeOffset = currentInstruction[2].u.operand;
1966 NodeIndex condition = get(currentInstruction[1].u.operand);
1967 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
1968 LAST_OPCODE(op_loop_if_true);
1971 case op_loop_if_false: {
1972 unsigned relativeOffset = currentInstruction[2].u.operand;
1973 NodeIndex condition = get(currentInstruction[1].u.operand);
1974 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
1975 LAST_OPCODE(op_loop_if_false);
1979 unsigned relativeOffset = currentInstruction[2].u.operand;
1980 NodeIndex value = get(currentInstruction[1].u.operand);
1981 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
1982 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
1983 LAST_OPCODE(op_jeq_null);
1986 case op_jneq_null: {
1987 unsigned relativeOffset = currentInstruction[2].u.operand;
1988 NodeIndex value = get(currentInstruction[1].u.operand);
1989 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
1990 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
1991 LAST_OPCODE(op_jneq_null);
1995 unsigned relativeOffset = currentInstruction[3].u.operand;
1996 NodeIndex op1 = get(currentInstruction[1].u.operand);
1997 NodeIndex op2 = get(currentInstruction[2].u.operand);
1998 NodeIndex condition = addToGraph(CompareLess, op1, op2);
1999 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
2000 LAST_OPCODE(op_jless);
2004 unsigned relativeOffset = currentInstruction[3].u.operand;
2005 NodeIndex op1 = get(currentInstruction[1].u.operand);
2006 NodeIndex op2 = get(currentInstruction[2].u.operand);
2007 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2008 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
2009 LAST_OPCODE(op_jlesseq);
2013 unsigned relativeOffset = currentInstruction[3].u.operand;
2014 NodeIndex op1 = get(currentInstruction[1].u.operand);
2015 NodeIndex op2 = get(currentInstruction[2].u.operand);
2016 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2017 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
2018 LAST_OPCODE(op_jgreater);
2021 case op_jgreatereq: {
2022 unsigned relativeOffset = currentInstruction[3].u.operand;
2023 NodeIndex op1 = get(currentInstruction[1].u.operand);
2024 NodeIndex op2 = get(currentInstruction[2].u.operand);
2025 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2026 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
2027 LAST_OPCODE(op_jgreatereq);
2031 unsigned relativeOffset = currentInstruction[3].u.operand;
2032 NodeIndex op1 = get(currentInstruction[1].u.operand);
2033 NodeIndex op2 = get(currentInstruction[2].u.operand);
2034 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2035 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
2036 LAST_OPCODE(op_jnless);
2040 unsigned relativeOffset = currentInstruction[3].u.operand;
2041 NodeIndex op1 = get(currentInstruction[1].u.operand);
2042 NodeIndex op2 = get(currentInstruction[2].u.operand);
2043 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2044 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
2045 LAST_OPCODE(op_jnlesseq);
2048 case op_jngreater: {
2049 unsigned relativeOffset = currentInstruction[3].u.operand;
2050 NodeIndex op1 = get(currentInstruction[1].u.operand);
2051 NodeIndex op2 = get(currentInstruction[2].u.operand);
2052 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2053 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
2054 LAST_OPCODE(op_jngreater);
2057 case op_jngreatereq: {
2058 unsigned relativeOffset = currentInstruction[3].u.operand;
2059 NodeIndex op1 = get(currentInstruction[1].u.operand);
2060 NodeIndex op2 = get(currentInstruction[2].u.operand);
2061 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2062 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
2063 LAST_OPCODE(op_jngreatereq);
2066 case op_loop_if_less: {
2067 unsigned relativeOffset = currentInstruction[3].u.operand;
2068 NodeIndex op1 = get(currentInstruction[1].u.operand);
2069 NodeIndex op2 = get(currentInstruction[2].u.operand);
2070 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2071 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
2072 LAST_OPCODE(op_loop_if_less);
2075 case op_loop_if_lesseq: {
2076 unsigned relativeOffset = currentInstruction[3].u.operand;
2077 NodeIndex op1 = get(currentInstruction[1].u.operand);
2078 NodeIndex op2 = get(currentInstruction[2].u.operand);
2079 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2080 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
2081 LAST_OPCODE(op_loop_if_lesseq);
2084 case op_loop_if_greater: {
2085 unsigned relativeOffset = currentInstruction[3].u.operand;
2086 NodeIndex op1 = get(currentInstruction[1].u.operand);
2087 NodeIndex op2 = get(currentInstruction[2].u.operand);
2088 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2089 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
2090 LAST_OPCODE(op_loop_if_greater);
2093 case op_loop_if_greatereq: {
2094 unsigned relativeOffset = currentInstruction[3].u.operand;
2095 NodeIndex op1 = get(currentInstruction[1].u.operand);
2096 NodeIndex op2 = get(currentInstruction[2].u.operand);
2097 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2098 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
2099 LAST_OPCODE(op_loop_if_greatereq);
2103 if (m_inlineStackTop->m_inlineCallFrame) {
2104 if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
2105 setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
2106 m_inlineStackTop->m_didReturn = true;
2107 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2108 // If we're returning from the first block, then we're done parsing.
2109 ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
2110 shouldContinueParsing = false;
2111 LAST_OPCODE(op_ret);
2113 // If inlining created blocks, and we're doing a return, then we need some
2115 ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2116 m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2118 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2119 ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2120 addToGraph(Jump, OpInfo(NoBlock));
2121 m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2122 m_inlineStackTop->m_didEarlyReturn = true;
2124 LAST_OPCODE(op_ret);
2126 addToGraph(Return, get(currentInstruction[1].u.operand));
2127 LAST_OPCODE(op_ret);
2130 ASSERT(!m_inlineStackTop->m_inlineCallFrame);
2131 addToGraph(Return, get(currentInstruction[1].u.operand));
2132 LAST_OPCODE(op_end);
2135 addToGraph(Throw, get(currentInstruction[1].u.operand));
2136 LAST_OPCODE(op_throw);
2138 case op_throw_reference_error:
2139 addToGraph(ThrowReferenceError);
2140 LAST_OPCODE(op_throw_reference_error);
2143 handleCall(interpreter, currentInstruction, Call, CodeForCall);
2144 NEXT_OPCODE(op_call);
2147 handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
2148 NEXT_OPCODE(op_construct);
2150 case op_call_put_result:
2151 NEXT_OPCODE(op_call_put_result);
2154 PredictedType prediction = getPrediction();
2156 unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2158 NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
2159 set(currentInstruction[1].u.operand, resolve);
2161 NEXT_OPCODE(op_resolve);
2164 case op_resolve_base: {
2165 PredictedType prediction = getPrediction();
2167 unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2169 NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
2170 set(currentInstruction[1].u.operand, resolve);
2172 NEXT_OPCODE(op_resolve_base);
2175 case op_resolve_global: {
2176 PredictedType prediction = getPrediction();
2178 NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
2179 m_graph.m_resolveGlobalData.append(ResolveGlobalData());
2180 ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
2181 data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2182 data.resolveInfoIndex = m_globalResolveNumber++;
2183 set(currentInstruction[1].u.operand, resolve);
2185 NEXT_OPCODE(op_resolve_global);
2188 case op_loop_hint: {
2189 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
2190 // OSR can only happen at basic block boundaries. Assert that these two statements
2192 ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
2194 // We never do OSR into an inlined code block. That could not happen, since OSR
2195 // looks up the code block that is the replacement for the baseline JIT code
2196 // block. Hence, machine code block = true code block = not inline code block.
2197 if (!m_inlineStackTop->m_caller)
2198 m_currentBlock->isOSRTarget = true;
2200 // Emit a phantom node to ensure that there is a placeholder node for this bytecode
2202 addToGraph(Phantom);
2204 NEXT_OPCODE(op_loop_hint);
2208 // Parse failed! This should not happen because the capabilities checker
2209 // should have caught it.
2210 ASSERT_NOT_REACHED();
2214 ASSERT(canCompileOpcode(opcodeID));
2218 template<ByteCodeParser::PhiStackType stackType>
2219 void ByteCodeParser::processPhiStack()
2221 Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
2223 while (!phiStack.isEmpty()) {
2224 PhiStackEntry entry = phiStack.last();
2225 phiStack.removeLast();
2227 PredecessorList& predecessors = entry.m_block->m_predecessors;
2228 unsigned varNo = entry.m_varNo;
2229 VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
2231 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2232 printf(" Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
2235 for (size_t i = 0; i < predecessors.size(); ++i) {
2236 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2237 printf(" Dealing with predecessor block %u.\n", predecessors[i]);
2240 BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
2242 NodeIndex& var = (stackType == ArgumentPhiStack) ? predecessorBlock->variablesAtTail.argument(varNo) : predecessorBlock->variablesAtTail.local(varNo);
2244 NodeIndex valueInPredecessor = var;
2245 if (valueInPredecessor == NoNode) {
2246 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2247 printf(" Did not find node, adding phi.\n");
2250 valueInPredecessor = addToGraph(Phi, OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))));
2251 var = valueInPredecessor;
2252 if (stackType == ArgumentPhiStack)
2253 predecessorBlock->variablesAtHead.setArgumentFirstTime(varNo, valueInPredecessor);
2255 predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
2256 phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
2257 } else if (m_graph[valueInPredecessor].op == GetLocal) {
2258 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2259 printf(" Found GetLocal @%u.\n", valueInPredecessor);
2262 // We want to ensure that the VariableAccessDatas are identical between the
2263 // GetLocal and its block-local Phi. Strictly speaking we only need the two
2264 // to be unified. But for efficiency, we want the code that creates GetLocals
2265 // and Phis to try to reuse VariableAccessDatas as much as possible.
2266 ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1()].variableAccessData());
2268 valueInPredecessor = m_graph[valueInPredecessor].child1();
2270 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2271 printf(" Found @%u.\n", valueInPredecessor);
2274 ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi || m_graph[valueInPredecessor].op == Flush || (m_graph[valueInPredecessor].op == SetArgument && stackType == ArgumentPhiStack));
2276 VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
2278 dataForPredecessor->unify(dataForPhi);
2280 Node* phiNode = &m_graph[entry.m_phi];
2281 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2282 printf(" Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
2284 if (phiNode->refCount()) {
2285 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2286 printf(" Reffing @%u.\n", valueInPredecessor);
2288 m_graph.ref(valueInPredecessor);
2291 if (phiNode->child1() == NoNode) {
2292 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2293 printf(" Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
2295 phiNode->children.fixed.child1 = valueInPredecessor;
2296 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2297 printf(" Children of @%u: ", entry.m_phi);
2298 phiNode->dumpChildren(stdout);
2303 if (phiNode->child2() == NoNode) {
2304 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2305 printf(" Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
2307 phiNode->children.fixed.child2 = valueInPredecessor;
2308 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2309 printf(" Children of @%u: ", entry.m_phi);
2310 phiNode->dumpChildren(stdout);
2315 if (phiNode->child3() == NoNode) {
2316 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2317 printf(" Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
2319 phiNode->children.fixed.child3 = valueInPredecessor;
2320 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2321 printf(" Children of @%u: ", entry.m_phi);
2322 phiNode->dumpChildren(stdout);
2328 NodeIndex newPhi = addToGraph(Phi, OpInfo(dataForPhi));
2330 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2331 printf(" Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
2334 phiNode = &m_graph[entry.m_phi]; // reload after vector resize
2335 Node& newPhiNode = m_graph[newPhi];
2336 if (phiNode->refCount())
2337 m_graph.ref(newPhi);
2339 newPhiNode.children.fixed.child1 = phiNode->child1();
2340 newPhiNode.children.fixed.child2 = phiNode->child2();
2341 newPhiNode.children.fixed.child3 = phiNode->child3();
2343 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2344 printf(" Children of @%u: ", newPhi);
2345 newPhiNode.dumpChildren(stdout);
2349 phiNode->children.fixed.child1 = newPhi;
2350 phiNode->children.fixed.child2 = valueInPredecessor;
2351 phiNode->children.fixed.child3 = NoNode;
2353 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2354 printf(" Children of @%u: ", entry.m_phi);
2355 phiNode->dumpChildren(stdout);
2362 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
2364 ASSERT(block->end != NoNode);
2365 ASSERT(!block->isLinked);
2366 ASSERT(block->end > block->begin);
2367 Node& node = m_graph[block->end - 1];
2368 ASSERT(node.isTerminal());
2372 node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2373 #if DFG_ENABLE(DEBUG_VERBOSE)
2374 printf("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
2379 node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
2380 node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
2381 #if DFG_ENABLE(DEBUG_VERBOSE)
2382 printf("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
2387 #if DFG_ENABLE(DEBUG_VERBOSE)
2388 printf("Marking basic block %p as linked.\n", block);
2393 #if !ASSERT_DISABLED
2394 block->isLinked = true;
2398 void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets)
2400 for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
2401 if (unlinkedBlocks[i].m_needsNormalLinking) {
2402 linkBlock(m_graph.m_blocks[unlinkedBlocks[i].m_blockIndex].get(), possibleTargets);
2403 unlinkedBlocks[i].m_needsNormalLinking = false;
2408 void ByteCodeParser::handleSuccessor(Vector<BlockIndex, 16>& worklist, BlockIndex blockIndex, BlockIndex successorIndex)
2410 BasicBlock* successor = m_graph.m_blocks[successorIndex].get();
2411 if (!successor->isReachable) {
2412 successor->isReachable = true;
2413 worklist.append(successorIndex);
2416 successor->m_predecessors.append(blockIndex);
2419 void ByteCodeParser::determineReachability()
2421 Vector<BlockIndex, 16> worklist;
2423 m_graph.m_blocks[0]->isReachable = true;
2424 while (!worklist.isEmpty()) {
2425 BlockIndex index = worklist.last();
2426 worklist.removeLast();
2428 BasicBlock* block = m_graph.m_blocks[index].get();
2429 ASSERT(block->isLinked);
2431 Node& node = m_graph[block->end - 1];
2432 ASSERT(node.isTerminal());
2435 handleSuccessor(worklist, index, node.takenBlockIndex());
2436 else if (node.isBranch()) {
2437 handleSuccessor(worklist, index, node.takenBlockIndex());
2438 handleSuccessor(worklist, index, node.notTakenBlockIndex());
2443 void ByteCodeParser::buildOperandMapsIfNecessary()
2445 if (m_haveBuiltOperandMaps)
2448 for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
2449 m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
2450 for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i)
2451 m_jsValueMap.add(JSValue::encode(m_codeBlock->getConstant(i + FirstConstantRegisterIndex)), i + FirstConstantRegisterIndex);
2453 m_haveBuiltOperandMaps = true;
2456 ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParser, CodeBlock* codeBlock, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind kind)
2457 : m_byteCodeParser(byteCodeParser)
2458 , m_codeBlock(codeBlock)
2459 , m_profiledBlock(profiledBlock)
2460 , m_calleeVR(calleeVR)
2461 , m_exitProfile(profiledBlock->exitProfile())
2462 , m_callsiteBlockHead(callsiteBlockHead)
2463 , m_returnValue(returnValueVR)
2464 , m_didReturn(false)
2465 , m_didEarlyReturn(false)
2466 , m_caller(byteCodeParser->m_inlineStackTop)
2470 ASSERT(codeBlock != byteCodeParser->m_codeBlock);
2472 ASSERT(calleeVR != InvalidVirtualRegister);
2473 ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
2474 ASSERT(callsiteBlockHead != NoBlock);
2476 InlineCallFrame inlineCallFrame;
2477 inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable());
2478 inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize;
2479 inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee);
2480 inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
2481 inlineCallFrame.arguments.resize(codeBlock->numParameters()); // Set the number of arguments including this, but don't configure the value recoveries, yet.
2482 inlineCallFrame.isCall = isCall(kind);
2483 byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
2484 m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
2486 byteCodeParser->buildOperandMapsIfNecessary();
2488 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
2489 m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
2491 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
2492 StringImpl* rep = codeBlock->identifier(i).impl();
2493 pair<IdentifierMap::iterator, bool> result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
2495 byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
2496 m_identifierRemap[i] = result.first->second;
2498 for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
2499 JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
2500 pair<JSValueMap::iterator, bool> result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
2501 if (result.second) {
2502 byteCodeParser->m_codeBlock->addConstant(value);
2503 byteCodeParser->m_constants.append(ConstantRecord());
2505 m_constantRemap[i] = result.first->second;
2508 m_callsiteBlockHeadNeedsLinking = true;
2510 // Machine code block case.
2511 ASSERT(codeBlock == byteCodeParser->m_codeBlock);
2513 ASSERT(calleeVR == InvalidVirtualRegister);
2514 ASSERT(returnValueVR == InvalidVirtualRegister);
2515 ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
2516 ASSERT(callsiteBlockHead == NoBlock);
2518 m_inlineCallFrame = 0;
2520 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
2521 m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
2523 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
2524 m_identifierRemap[i] = i;
2525 for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
2526 m_constantRemap[i] = i + FirstConstantRegisterIndex;
2528 m_callsiteBlockHeadNeedsLinking = false;
2531 for (size_t i = 0; i < m_constantRemap.size(); ++i)
2532 ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
2534 byteCodeParser->m_inlineStackTop = this;
2537 void ByteCodeParser::parseCodeBlock()
2539 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
2541 for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
2542 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
2543 unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
2544 #if DFG_ENABLE(DEBUG_VERBOSE)
2545 printf("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
2547 ASSERT(m_currentIndex < limit);
2549 // Loop until we reach the current limit (i.e. next jump target).
2551 if (!m_currentBlock) {
2552 // Check if we can use the last block.
2553 if (!m_graph.m_blocks.isEmpty() && m_graph.m_blocks.last()->begin == m_graph.m_blocks.last()->end) {
2554 // This must be a block belonging to us.
2555 ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2556 // Either the block is linkable or it isn't. If it's linkable then it's the last
2557 // block in the blockLinkingTargets list. If it's not then the last block will
2558 // have a lower bytecode index that the one we're about to give to this block.
2559 if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin != m_currentIndex) {
2560 // Make the block linkable.
2561 ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_blockLinkingTargets.last()]->bytecodeBegin < m_currentIndex);
2562 m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size() - 1);
2564 // Change its bytecode begin and continue.
2565 m_currentBlock = m_graph.m_blocks.last().get();
2566 #if DFG_ENABLE(DEBUG_VERBOSE)
2567 printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
2569 m_currentBlock->bytecodeBegin = m_currentIndex;
2571 OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
2572 #if DFG_ENABLE(DEBUG_VERBOSE)
2573 printf("Creating basic block %p, #%lu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
2575 m_currentBlock = block.get();
2576 ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
2577 m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
2578 m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size());
2579 m_graph.m_blocks.append(block.release());
2580 prepareToParseBlock();
2584 bool shouldContinueParsing = parseBlock(limit);
2586 // We should not have gone beyond the limit.
2587 ASSERT(m_currentIndex <= limit);
2589 // We should have planted a terminal, or we just gave up because
2590 // we realized that the jump target information is imprecise, or we
2591 // are at the end of an inline function, or we realized that we
2592 // should stop parsing because there was a return in the first
2594 ASSERT(m_currentBlock->begin == m_graph.size() || m_graph.last().isTerminal() || (m_currentIndex == codeBlock->instructions().size() && m_inlineStackTop->m_inlineCallFrame) || !shouldContinueParsing);
2596 m_currentBlock->end = m_graph.size();
2598 if (!shouldContinueParsing)
2602 } while (m_currentIndex < limit);
2605 // Should have reached the end of the instructions.
2606 ASSERT(m_currentIndex == codeBlock->instructions().size());
2609 bool ByteCodeParser::parse()
2611 // Set during construction.
2612 ASSERT(!m_currentIndex);
2614 InlineStackEntry inlineStackEntry(this, m_codeBlock, m_profiledBlock, NoBlock, InvalidVirtualRegister, 0, InvalidVirtualRegister, InvalidVirtualRegister, CodeForCall);
2618 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
2619 determineReachability();
2620 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2621 printf("Processing local variable phis.\n");
2623 processPhiStack<LocalPhiStack>();
2624 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2625 printf("Processing argument phis.\n");
2627 processPhiStack<ArgumentPhiStack>();
2629 m_graph.m_preservedVars = m_preservedVars;
2630 m_graph.m_localVars = m_numLocals;
2631 m_graph.m_parameterSlots = m_parameterSlots;
2636 bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
2638 #if DFG_DEBUG_LOCAL_DISBALE
2639 UNUSED_PARAM(graph);
2640 UNUSED_PARAM(globalData);
2641 UNUSED_PARAM(codeBlock);
2644 return ByteCodeParser(globalData, codeBlock, codeBlock->alternative(), graph).parse();
2648 } } // namespace JSC::DFG