2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "DFGCapabilities.h"
32 #include "CodeBlock.h"
33 #include <wtf/HashMap.h>
34 #include <wtf/MathExtras.h>
36 namespace JSC { namespace DFG {
38 // === ByteCodeParser ===
40 // This class is used to compile the dataflow graph from a CodeBlock.
41 class ByteCodeParser {
43 ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock, Graph& graph)
44 : m_globalData(globalData)
45 , m_codeBlock(codeBlock)
46 , m_profiledBlock(profiledBlock)
49 , m_parseFailed(false)
50 , m_constantUndefined(UINT_MAX)
51 , m_constantNull(UINT_MAX)
52 , m_constantNaN(UINT_MAX)
53 , m_constant1(UINT_MAX)
54 , m_constants(codeBlock->numberOfConstantRegisters())
55 , m_numArguments(codeBlock->m_numParameters)
56 , m_numLocals(codeBlock->m_numCalleeRegisters)
57 , m_preservedVars(codeBlock->m_numVars)
59 , m_numPassedVarArgs(0)
60 , m_globalResolveNumber(0)
62 ASSERT(m_profiledBlock);
65 // Parse a full CodeBlock of bytecode.
69 // Helper for min and max.
70 bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int firstArg, int lastArg);
72 // Handle intrinsic functions.
73 bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int firstArg, int lastArg, PredictedType prediction);
74 // Parse a single basic block of bytecode instructions.
75 bool parseBlock(unsigned limit);
76 // Setup predecessor links in the graph's BasicBlocks.
77 void setupPredecessors();
78 // Link GetLocal & SetLocal nodes, to ensure live values are generated.
83 template<PhiStackType stackType>
84 void processPhiStack();
85 // Add spill locations to nodes.
86 void allocateVirtualRegisters();
88 VariableAccessData* newVariableAccessData(int operand)
90 ASSERT(operand < FirstConstantRegisterIndex);
92 m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand)));
93 return &m_graph.m_variableAccessData.last();
96 // Get/Set the operands/result of a bytecode instruction.
97 NodeIndex get(int operand)
99 // Is this a constant?
100 if (operand >= FirstConstantRegisterIndex) {
101 unsigned constant = operand - FirstConstantRegisterIndex;
102 ASSERT(constant < m_constants.size());
103 return getJSConstant(constant);
106 // Is this an argument?
107 if (operandIsArgument(operand))
108 return getArgument(operand);
111 return getLocal((unsigned)operand);
113 void set(int operand, NodeIndex value)
115 // Is this an argument?
116 if (operandIsArgument(operand)) {
117 setArgument(operand, value);
122 setLocal((unsigned)operand, value);
125 // Used in implementing get/set, above, where the operand is a local variable.
126 NodeIndex getLocal(unsigned operand)
128 NodeIndex nodeIndex = m_currentBlock->m_localsAtTail[operand].value;
130 if (nodeIndex != NoNode) {
131 Node& node = m_graph[nodeIndex];
132 if (node.op == GetLocal)
134 ASSERT(node.op == SetLocal);
135 return node.child1();
138 // Check for reads of temporaries from prior blocks,
139 // expand m_preservedVars to cover these.
140 m_preservedVars = std::max(m_preservedVars, operand + 1);
142 VariableAccessData* variableAccessData = newVariableAccessData(operand);
144 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
145 m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
146 nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
147 m_currentBlock->m_localsAtTail[operand].value = nodeIndex;
149 m_currentBlock->m_localsAtHead[operand].setFirstTime(nodeIndex);
153 void setLocal(unsigned operand, NodeIndex value)
155 m_currentBlock->m_localsAtTail[operand].value = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
158 // Used in implementing get/set, above, where the operand is an argument.
159 NodeIndex getArgument(unsigned operand)
161 unsigned argument = operand + m_codeBlock->m_numParameters + RegisterFile::CallFrameHeaderSize;
162 ASSERT(argument < m_numArguments);
164 NodeIndex nodeIndex = m_currentBlock->m_argumentsAtTail[argument].value;
166 if (nodeIndex != NoNode) {
167 Node& node = m_graph[nodeIndex];
168 if (node.op == SetArgument) {
169 // We're getting an argument in the first basic block; link
170 // the GetLocal to the SetArgument.
171 ASSERT(node.local() == static_cast<VirtualRegister>(operand));
172 nodeIndex = addToGraph(GetLocal, OpInfo(node.variableAccessData()), nodeIndex);
173 m_currentBlock->m_argumentsAtTail[argument].value = nodeIndex;
177 if (node.op == GetLocal)
180 ASSERT(node.op == SetLocal);
181 return node.child1();
184 VariableAccessData* variableAccessData = newVariableAccessData(operand);
186 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
187 m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
188 nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
189 m_currentBlock->m_argumentsAtTail[argument].value = nodeIndex;
191 m_currentBlock->m_argumentsAtHead[argument].setFirstTime(nodeIndex);
195 void setArgument(int operand, NodeIndex value)
197 unsigned argument = operand + m_codeBlock->m_numParameters + RegisterFile::CallFrameHeaderSize;
198 ASSERT(argument < m_numArguments);
200 m_currentBlock->m_argumentsAtTail[argument].value = addToGraph(SetLocal, OpInfo(newVariableAccessData(operand)), value);
203 // Get an operand, and perform a ToInt32/ToNumber conversion on it.
204 NodeIndex getToInt32(int operand)
206 return toInt32(get(operand));
208 NodeIndex getToNumber(int operand)
210 return toNumber(get(operand));
213 // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
214 NodeIndex toInt32(NodeIndex index)
216 Node& node = m_graph[index];
218 if (node.hasInt32Result())
221 if (node.op == UInt32ToNumber)
222 return node.child1();
224 // Check for numeric constants boxed as JSValues.
225 if (node.op == JSConstant) {
226 JSValue v = valueOfJSConstant(index);
228 return getJSConstant(node.constantNumber());
229 // FIXME: We could convert the double ToInteger at this point.
232 return addToGraph(ValueToInt32, index);
235 // Perform an ES5 ToNumber operation - returns a node of type NodeResultDouble.
236 NodeIndex toNumber(NodeIndex index)
238 Node& node = m_graph[index];
240 if (node.hasNumberResult())
243 if (node.op == JSConstant) {
244 JSValue v = valueOfJSConstant(index);
246 return getJSConstant(node.constantNumber());
249 return addToGraph(ValueToNumber, OpInfo(NodeUseBottom), index);
252 NodeIndex getJSConstant(unsigned constant)
254 NodeIndex index = m_constants[constant].asJSValue;
258 NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
259 m_constants[constant].asJSValue = resultIndex;
263 // Helper functions to get/set the this value.
266 return getArgument(m_codeBlock->thisRegister());
268 void setThis(NodeIndex value)
270 setArgument(m_codeBlock->thisRegister(), value);
273 // Convenience methods for checking nodes for constants.
274 bool isJSConstant(NodeIndex index)
276 return m_graph[index].op == JSConstant;
278 bool isInt32Constant(NodeIndex nodeIndex)
280 return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
282 bool isSmallInt32Constant(NodeIndex nodeIndex)
284 if (!isJSConstant(nodeIndex))
286 JSValue value = valueOfJSConstant(nodeIndex);
287 if (!value.isInt32())
289 int32_t intValue = value.asInt32();
290 return intValue >= -5 && intValue <= 5;
292 // Convenience methods for getting constant values.
293 JSValue valueOfJSConstant(NodeIndex index)
295 ASSERT(isJSConstant(index));
296 return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
298 int32_t valueOfInt32Constant(NodeIndex nodeIndex)
300 ASSERT(isInt32Constant(nodeIndex));
301 return valueOfJSConstant(nodeIndex).asInt32();
304 // This method returns a JSConstant with the value 'undefined'.
305 NodeIndex constantUndefined()
307 // Has m_constantUndefined been set up yet?
308 if (m_constantUndefined == UINT_MAX) {
309 // Search the constant pool for undefined, if we find it, we can just reuse this!
310 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
311 for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
312 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
313 if (testMe.isUndefined())
314 return getJSConstant(m_constantUndefined);
317 // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
318 ASSERT(m_constants.size() == numberOfConstants);
319 m_codeBlock->addConstant(jsUndefined());
320 m_constants.append(ConstantRecord());
321 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
324 // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
325 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
326 return getJSConstant(m_constantUndefined);
329 // This method returns a JSConstant with the value 'null'.
330 NodeIndex constantNull()
332 // Has m_constantNull been set up yet?
333 if (m_constantNull == UINT_MAX) {
334 // Search the constant pool for null, if we find it, we can just reuse this!
335 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
336 for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
337 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
339 return getJSConstant(m_constantNull);
342 // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
343 ASSERT(m_constants.size() == numberOfConstants);
344 m_codeBlock->addConstant(jsNull());
345 m_constants.append(ConstantRecord());
346 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
349 // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
350 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
351 return getJSConstant(m_constantNull);
354 // This method returns a DoubleConstant with the value 1.
357 // Has m_constant1 been set up yet?
358 if (m_constant1 == UINT_MAX) {
359 // Search the constant pool for the value 1, if we find it, we can just reuse this!
360 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
361 for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
362 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
363 if (testMe.isInt32() && testMe.asInt32() == 1)
364 return getJSConstant(m_constant1);
367 // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
368 ASSERT(m_constants.size() == numberOfConstants);
369 m_codeBlock->addConstant(jsNumber(1));
370 m_constants.append(ConstantRecord());
371 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
374 // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
375 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
376 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
377 return getJSConstant(m_constant1);
380 // This method returns a DoubleConstant with the value NaN.
381 NodeIndex constantNaN()
383 JSValue nan = jsNaN();
385 // Has m_constantNaN been set up yet?
386 if (m_constantNaN == UINT_MAX) {
387 // Search the constant pool for the value NaN, if we find it, we can just reuse this!
388 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
389 for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
390 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
391 if (JSValue::encode(testMe) == JSValue::encode(nan))
392 return getJSConstant(m_constantNaN);
395 // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
396 ASSERT(m_constants.size() == numberOfConstants);
397 m_codeBlock->addConstant(nan);
398 m_constants.append(ConstantRecord());
399 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
402 // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
403 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
404 ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
405 return getJSConstant(m_constantNaN);
408 NodeIndex cellConstant(JSCell* cell)
410 HashMap<JSCell*, unsigned>::iterator iter = m_cellConstants.find(cell);
411 if (iter != m_cellConstants.end())
412 return getJSConstant(iter->second);
414 m_codeBlock->addConstant(cell);
415 m_constants.append(ConstantRecord());
416 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
418 m_cellConstants.add(cell, m_codeBlock->numberOfConstantRegisters() - 1);
420 return getJSConstant(m_codeBlock->numberOfConstantRegisters() - 1);
423 CodeOrigin currentCodeOrigin()
425 return CodeOrigin(m_currentIndex);
428 // These methods create a node and add it to the graph. If nodes of this type are
429 // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation.
430 NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
432 NodeIndex resultIndex = (NodeIndex)m_graph.size();
433 m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
435 if (op & NodeMustGenerate)
436 m_graph.ref(resultIndex);
439 NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
441 NodeIndex resultIndex = (NodeIndex)m_graph.size();
442 m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
444 if (op & NodeMustGenerate)
445 m_graph.ref(resultIndex);
448 NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
450 NodeIndex resultIndex = (NodeIndex)m_graph.size();
451 m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
453 if (op & NodeMustGenerate)
454 m_graph.ref(resultIndex);
458 NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
460 NodeIndex resultIndex = (NodeIndex)m_graph.size();
461 m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
463 m_numPassedVarArgs = 0;
465 if (op & NodeMustGenerate)
466 m_graph.ref(resultIndex);
469 void addVarArgChild(NodeIndex child)
471 m_graph.m_varArgChildren.append(child);
472 m_numPassedVarArgs++;
475 NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
477 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
479 PredictedType prediction = PredictNone;
480 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
481 prediction = getPrediction(m_graph.size(), m_currentIndex + OPCODE_LENGTH(op_call));
483 addVarArgChild(get(currentInstruction[1].u.operand));
484 int argCount = currentInstruction[2].u.operand;
485 int registerOffset = currentInstruction[3].u.operand;
486 int firstArg = registerOffset - argCount - RegisterFile::CallFrameHeaderSize;
487 for (int argIdx = firstArg + (op == Construct ? 1 : 0); argIdx < firstArg + argCount; argIdx++)
488 addVarArgChild(get(argIdx));
489 NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
490 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
491 set(putInstruction[1].u.operand, call);
492 if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
493 m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount;
497 PredictedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
499 UNUSED_PARAM(nodeIndex);
501 ValueProfile* profile = m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
503 PredictedType prediction = profile->computeUpdatedPrediction();
504 #if ENABLE(DFG_DEBUG_VERBOSE)
505 printf("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
508 if (prediction == PredictNone) {
509 // We have no information about what values this node generates. Give up
510 // on executing this code, since we're likely to do more damage than good.
511 addToGraph(ForceOSRExit);
517 PredictedType getPrediction()
519 return getPrediction(m_graph.size(), m_currentIndex);
522 NodeIndex makeSafe(NodeIndex nodeIndex)
524 if (!m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
527 #if ENABLE(DFG_DEBUG_VERBOSE)
528 printf("Making %s @%u safe at bc#%u because slow-case counter is at %u\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter);
531 switch (m_graph[nodeIndex].op) {
536 case ArithMod: // for ArithMode "MayOverflow" means we tried to divide by zero, or we saw double.
537 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
541 if (m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex))
542 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
544 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayNegZero);
548 ASSERT_NOT_REACHED();
555 NodeIndex makeDivSafe(NodeIndex nodeIndex)
557 ASSERT(m_graph[nodeIndex].op == ArithDiv);
559 // The main slow case counter for op_div in the old JIT counts only when
560 // the operands are not numbers. We don't care about that since we already
561 // have speculations in place that take care of that separately. We only
562 // care about when the outcome of the division is not an integer, which
563 // is what the special fast case counter tells us.
565 if (!m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex))
568 m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
573 JSGlobalData* m_globalData;
574 CodeBlock* m_codeBlock;
575 CodeBlock* m_profiledBlock;
578 // The current block being generated.
579 BasicBlock* m_currentBlock;
580 // The bytecode index of the current instruction being generated.
581 unsigned m_currentIndex;
583 // Record failures due to unimplemented functionality or regressions.
586 // We use these values during code generation, and to avoid the need for
587 // special handling we make sure they are available as constants in the
588 // CodeBlock's constant pool. These variables are initialized to
589 // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
590 // constant pool, as necessary.
591 unsigned m_constantUndefined;
592 unsigned m_constantNull;
593 unsigned m_constantNaN;
594 unsigned m_constant1;
595 HashMap<JSCell*, unsigned> m_cellConstants;
597 // A constant in the constant pool may be represented by more than one
598 // node in the graph, depending on the context in which it is being used.
599 struct ConstantRecord {
612 // Track the index of the node whose result is the current value for every
613 // register value in the bytecode - argument, local, and temporary.
614 Vector<ConstantRecord, 16> m_constants;
616 // The number of arguments passed to the function.
617 unsigned m_numArguments;
618 // The number of locals (vars + temporaries) used in the function.
619 unsigned m_numLocals;
620 // The number of registers we need to preserve across BasicBlock boundaries;
621 // typically equal to the number vars, but we expand this to cover all
622 // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
623 unsigned m_preservedVars;
624 // The number of slots (in units of sizeof(Register)) that we need to
625 // preallocate for calls emanating from this frame. This includes the
626 // size of the CallFrame, only if this is not a leaf function. (I.e.
627 // this is 0 if and only if this function is a leaf.)
628 unsigned m_parameterSlots;
629 // The number of var args passed to the next var arg node.
630 unsigned m_numPassedVarArgs;
631 // The index in the global resolve info.
632 unsigned m_globalResolveNumber;
634 struct PhiStackEntry {
635 PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
646 Vector<PhiStackEntry, 16> m_argumentPhiStack;
647 Vector<PhiStackEntry, 16> m_localPhiStack;
649 Vector<Node*, 16> m_reusableNodeStack;
652 #define NEXT_OPCODE(name) \
653 m_currentIndex += OPCODE_LENGTH(name); \
656 #define LAST_OPCODE(name) \
657 m_currentIndex += OPCODE_LENGTH(name); \
658 return !m_parseFailed
660 bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int firstArg, int lastArg)
665 if (lastArg == firstArg) {
666 set(resultOperand, constantNaN());
670 if (lastArg == firstArg + 1) {
671 set(resultOperand, getToNumber(firstArg + 1));
675 if (lastArg == firstArg + 2) {
676 set(resultOperand, addToGraph(op, OpInfo(NodeUseBottom), getToNumber(firstArg + 1), getToNumber(firstArg + 2)));
680 // Don't handle >=3 arguments for now.
684 bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int firstArg, int lastArg, PredictedType prediction)
689 // There is no such thing as executing abs for effect, so this
694 // We don't care about the this argument. If we don't have a first
695 // argument then make this JSConstant(NaN).
696 int absArg = firstArg + 1;
697 if (absArg > lastArg)
698 set(resultOperand, constantNaN());
700 set(resultOperand, addToGraph(ArithAbs, OpInfo(NodeUseBottom), getToNumber(absArg)));
705 return handleMinMax(usesResult, resultOperand, ArithMin, firstArg, lastArg);
708 return handleMinMax(usesResult, resultOperand, ArithMax, firstArg, lastArg);
710 case SqrtIntrinsic: {
714 if (firstArg == lastArg) {
715 set(resultOperand, constantNaN());
719 set(resultOperand, addToGraph(ArithSqrt, getToNumber(firstArg + 1)));
723 case ArrayPushIntrinsic: {
724 if (firstArg + 1 != lastArg)
727 NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(0), OpInfo(prediction), get(firstArg), get(firstArg + 1));
729 set(resultOperand, arrayPush);
734 case ArrayPopIntrinsic: {
735 if (firstArg != lastArg)
738 NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(0), OpInfo(prediction), get(firstArg));
740 set(resultOperand, arrayPop);
745 ASSERT(intrinsic == NoIntrinsic);
750 bool ByteCodeParser::parseBlock(unsigned limit)
752 // No need to reset state initially, since it has been set by the constructor.
753 if (m_currentIndex) {
754 for (unsigned i = 0; i < m_constants.size(); ++i)
755 m_constants[i] = ConstantRecord();
758 // If we are the first basic block, introduce markers for arguments. This allows
759 // us to track if a use of an argument may use the actual argument passed, as
760 // opposed to using a value we set explicitly.
761 if (!m_currentIndex) {
762 m_graph.m_arguments.resize(m_numArguments);
763 for (unsigned argument = 0; argument < m_numArguments; ++argument) {
764 NodeIndex setArgument = addToGraph(SetArgument, OpInfo(newVariableAccessData(argument - m_codeBlock->m_numParameters - RegisterFile::CallFrameHeaderSize)));
765 m_graph.m_arguments[argument] = setArgument;
766 m_currentBlock->m_argumentsAtHead[argument].setFirstTime(setArgument);
767 m_currentBlock->m_argumentsAtTail[argument].setFirstTime(setArgument);
771 Interpreter* interpreter = m_globalData->interpreter;
772 Instruction* instructionsBegin = m_codeBlock->instructions().begin();
773 unsigned blockBegin = m_currentIndex;
775 // Don't extend over jump destinations.
776 if (m_currentIndex == limit) {
777 addToGraph(Jump, OpInfo(m_currentIndex));
778 return !m_parseFailed;
781 // Switch on the current bytecode opcode.
782 Instruction* currentInstruction = instructionsBegin + m_currentIndex;
783 OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
786 // === Function entry opcodes ===
789 // Initialize all locals to undefined.
790 for (int i = 0; i < m_codeBlock->m_numVars; ++i)
791 set(i, constantUndefined());
792 NEXT_OPCODE(op_enter);
794 case op_convert_this: {
795 NodeIndex op1 = getThis();
796 setThis(addToGraph(ConvertThis, op1));
797 NEXT_OPCODE(op_convert_this);
800 case op_create_this: {
801 NodeIndex op1 = get(currentInstruction[2].u.operand);
802 set(currentInstruction[1].u.operand, addToGraph(CreateThis, op1));
803 NEXT_OPCODE(op_create_this);
806 case op_new_object: {
807 set(currentInstruction[1].u.operand, addToGraph(NewObject));
808 NEXT_OPCODE(op_new_object);
812 int startOperand = currentInstruction[2].u.operand;
813 int numOperands = currentInstruction[3].u.operand;
814 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
815 addVarArgChild(get(operandIdx));
816 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0)));
817 NEXT_OPCODE(op_new_array);
820 case op_new_array_buffer: {
821 int startConstant = currentInstruction[2].u.operand;
822 int numConstants = currentInstruction[3].u.operand;
823 set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants)));
824 NEXT_OPCODE(op_new_array_buffer);
827 case op_new_regexp: {
828 set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
829 NEXT_OPCODE(op_new_regexp);
832 case op_get_callee: {
833 set(currentInstruction[1].u.operand, addToGraph(GetCallee));
834 NEXT_OPCODE(op_get_callee);
837 // === Bitwise operations ===
840 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
841 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
842 set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
843 NEXT_OPCODE(op_bitand);
847 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
848 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
849 set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
850 NEXT_OPCODE(op_bitor);
854 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
855 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
856 set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
857 NEXT_OPCODE(op_bitxor);
861 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
862 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
864 // Optimize out shifts by zero.
865 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
868 result = addToGraph(BitRShift, op1, op2);
869 set(currentInstruction[1].u.operand, result);
870 NEXT_OPCODE(op_rshift);
874 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
875 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
877 // Optimize out shifts by zero.
878 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
881 result = addToGraph(BitLShift, op1, op2);
882 set(currentInstruction[1].u.operand, result);
883 NEXT_OPCODE(op_lshift);
887 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
888 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
890 // The result of a zero-extending right shift is treated as an unsigned value.
891 // This means that if the top bit is set, the result is not in the int32 range,
892 // and as such must be stored as a double. If the shift amount is a constant,
893 // we may be able to optimize.
894 if (isInt32Constant(op2)) {
895 // If we know we are shifting by a non-zero amount, then since the operation
896 // zero fills we know the top bit of the result must be zero, and as such the
897 // result must be within the int32 range. Conversely, if this is a shift by
898 // zero, then the result may be changed by the conversion to unsigned, but it
899 // is not necessary to perform the shift!
900 if (valueOfInt32Constant(op2) & 0x1f)
901 result = addToGraph(BitURShift, op1, op2);
903 result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), op1));
905 // Cannot optimize at this stage; shift & potentially rebox as a double.
906 result = addToGraph(BitURShift, op1, op2);
907 result = makeSafe(addToGraph(UInt32ToNumber, OpInfo(NodeUseBottom), result));
909 set(currentInstruction[1].u.operand, result);
910 NEXT_OPCODE(op_urshift);
913 // === Increment/Decrement opcodes ===
916 unsigned srcDst = currentInstruction[1].u.operand;
917 NodeIndex op = getToNumber(srcDst);
918 set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
919 NEXT_OPCODE(op_pre_inc);
923 unsigned result = currentInstruction[1].u.operand;
924 unsigned srcDst = currentInstruction[2].u.operand;
925 NodeIndex op = getToNumber(srcDst);
927 set(srcDst, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), op, one())));
928 NEXT_OPCODE(op_post_inc);
932 unsigned srcDst = currentInstruction[1].u.operand;
933 NodeIndex op = getToNumber(srcDst);
934 set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
935 NEXT_OPCODE(op_pre_dec);
939 unsigned result = currentInstruction[1].u.operand;
940 unsigned srcDst = currentInstruction[2].u.operand;
941 NodeIndex op = getToNumber(srcDst);
943 set(srcDst, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op, one())));
944 NEXT_OPCODE(op_post_dec);
947 // === Arithmetic operations ===
950 NodeIndex op1 = get(currentInstruction[2].u.operand);
951 NodeIndex op2 = get(currentInstruction[3].u.operand);
952 if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
953 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, OpInfo(NodeUseBottom), toNumber(op1), toNumber(op2))));
955 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, OpInfo(NodeUseBottom), op1, op2)));
960 NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
961 NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
962 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, OpInfo(NodeUseBottom), op1, op2)));
967 // Multiply requires that the inputs are not truncated, unfortunately.
968 NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
969 NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
970 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, OpInfo(NodeUseBottom), op1, op2)));
975 NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
976 NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
977 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, OpInfo(NodeUseBottom), op1, op2)));
982 NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
983 NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
984 set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, OpInfo(NodeUseBottom), op1, op2)));
988 // === Misc operations ===
990 #if ENABLE(DEBUG_WITH_BREAKPOINT)
992 addToGraph(Breakpoint);
993 NEXT_OPCODE(op_debug);
996 NodeIndex op = get(currentInstruction[2].u.operand);
997 set(currentInstruction[1].u.operand, op);
1001 case op_check_has_instance:
1002 addToGraph(CheckHasInstance, get(currentInstruction[1].u.operand));
1003 NEXT_OPCODE(op_check_has_instance);
1005 case op_instanceof: {
1006 NodeIndex value = get(currentInstruction[2].u.operand);
1007 NodeIndex baseValue = get(currentInstruction[3].u.operand);
1008 NodeIndex prototype = get(currentInstruction[4].u.operand);
1009 set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
1010 NEXT_OPCODE(op_instanceof);
1014 NodeIndex value = get(currentInstruction[2].u.operand);
1015 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
1016 NEXT_OPCODE(op_not);
1019 case op_to_primitive: {
1020 NodeIndex value = get(currentInstruction[2].u.operand);
1021 set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
1022 NEXT_OPCODE(op_to_primitive);
1026 int startOperand = currentInstruction[2].u.operand;
1027 int numOperands = currentInstruction[3].u.operand;
1028 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1029 addVarArgChild(get(operandIdx));
1030 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
1031 NEXT_OPCODE(op_strcat);
1035 NodeIndex op1 = get(currentInstruction[2].u.operand);
1036 NodeIndex op2 = get(currentInstruction[3].u.operand);
1037 set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
1038 NEXT_OPCODE(op_less);
1042 NodeIndex op1 = get(currentInstruction[2].u.operand);
1043 NodeIndex op2 = get(currentInstruction[3].u.operand);
1044 set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
1045 NEXT_OPCODE(op_lesseq);
1049 NodeIndex op1 = get(currentInstruction[2].u.operand);
1050 NodeIndex op2 = get(currentInstruction[3].u.operand);
1051 set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
1052 NEXT_OPCODE(op_greater);
1055 case op_greatereq: {
1056 NodeIndex op1 = get(currentInstruction[2].u.operand);
1057 NodeIndex op2 = get(currentInstruction[3].u.operand);
1058 set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
1059 NEXT_OPCODE(op_greatereq);
1063 NodeIndex op1 = get(currentInstruction[2].u.operand);
1064 NodeIndex op2 = get(currentInstruction[3].u.operand);
1065 set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
1070 NodeIndex value = get(currentInstruction[2].u.operand);
1071 set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
1072 NEXT_OPCODE(op_eq_null);
1076 NodeIndex op1 = get(currentInstruction[2].u.operand);
1077 NodeIndex op2 = get(currentInstruction[3].u.operand);
1078 set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
1079 NEXT_OPCODE(op_stricteq);
1083 NodeIndex op1 = get(currentInstruction[2].u.operand);
1084 NodeIndex op2 = get(currentInstruction[3].u.operand);
1085 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
1086 NEXT_OPCODE(op_neq);
1090 NodeIndex value = get(currentInstruction[2].u.operand);
1091 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
1092 NEXT_OPCODE(op_neq_null);
1095 case op_nstricteq: {
1096 NodeIndex op1 = get(currentInstruction[2].u.operand);
1097 NodeIndex op2 = get(currentInstruction[3].u.operand);
1098 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
1099 NEXT_OPCODE(op_nstricteq);
1102 // === Property access operations ===
1104 case op_get_by_val: {
1105 PredictedType prediction = getPrediction();
1107 NodeIndex base = get(currentInstruction[2].u.operand);
1108 NodeIndex property = get(currentInstruction[3].u.operand);
1110 NodeIndex getByVal = addToGraph(GetByVal, OpInfo(0), OpInfo(prediction), base, property);
1111 set(currentInstruction[1].u.operand, getByVal);
1113 NEXT_OPCODE(op_get_by_val);
1116 case op_put_by_val: {
1117 NodeIndex base = get(currentInstruction[1].u.operand);
1118 NodeIndex property = get(currentInstruction[2].u.operand);
1119 NodeIndex value = get(currentInstruction[3].u.operand);
1121 addToGraph(PutByVal, base, property, value);
1123 NEXT_OPCODE(op_put_by_val);
1126 case op_method_check: {
1127 Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check);
1129 PredictedType prediction = getPrediction();
1131 ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id);
1133 NodeIndex base = get(getInstruction[2].u.operand);
1134 unsigned identifier = getInstruction[3].u.operand;
1136 // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
1137 // node, which is a lot more efficient.
1138 StructureStubInfo& stubInfo = m_profiledBlock->getStubInfo(m_currentIndex);
1139 MethodCallLinkInfo& methodCall = m_profiledBlock->getMethodCallLinkInfo(m_currentIndex);
1141 if (methodCall.seen && !!methodCall.cachedStructure && !stubInfo.seen) {
1142 // It's monomorphic as far as we can tell, since the method_check was linked
1143 // but the slow path (i.e. the normal get_by_id) never fired.
1145 NodeIndex checkMethod = addToGraph(CheckMethod, OpInfo(identifier), OpInfo(m_graph.m_methodCheckData.size()), base);
1146 set(getInstruction[1].u.operand, checkMethod);
1148 MethodCheckData methodCheckData;
1149 methodCheckData.structure = methodCall.cachedStructure.get();
1150 methodCheckData.prototypeStructure = methodCall.cachedPrototypeStructure.get();
1151 methodCheckData.function = methodCall.cachedFunction.get();
1152 methodCheckData.prototype = methodCall.cachedPrototype.get();
1153 m_graph.m_methodCheckData.append(methodCheckData);
1155 NodeIndex getMethod = addToGraph(GetMethod, OpInfo(identifier), OpInfo(prediction), base);
1156 set(getInstruction[1].u.operand, getMethod);
1159 m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id);
1162 case op_get_scoped_var: {
1163 PredictedType prediction = getPrediction();
1164 int dst = currentInstruction[1].u.operand;
1165 int slot = currentInstruction[2].u.operand;
1166 int depth = currentInstruction[3].u.operand;
1167 NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1168 NodeIndex getScopedVar = addToGraph(GetScopedVar, OpInfo(slot), OpInfo(prediction), getScopeChain);
1169 set(dst, getScopedVar);
1170 NEXT_OPCODE(op_get_scoped_var);
1172 case op_put_scoped_var: {
1173 int slot = currentInstruction[1].u.operand;
1174 int depth = currentInstruction[2].u.operand;
1175 int source = currentInstruction[3].u.operand;
1176 NodeIndex getScopeChain = addToGraph(GetScopeChain, OpInfo(depth));
1177 addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source));
1178 NEXT_OPCODE(op_put_scoped_var);
1180 case op_get_by_id: {
1181 PredictedType prediction = getPrediction();
1183 NodeIndex base = get(currentInstruction[2].u.operand);
1184 unsigned identifierNumber = currentInstruction[3].u.operand;
1186 Identifier identifier = m_codeBlock->identifier(identifierNumber);
1187 StructureStubInfo& stubInfo = m_profiledBlock->getStubInfo(m_currentIndex);
1189 size_t offset = notFound;
1190 StructureSet structureSet;
1191 if (stubInfo.seen) {
1192 switch (stubInfo.accessType) {
1193 case access_get_by_id_self: {
1194 Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
1195 offset = structure->get(*m_globalData, identifier);
1197 if (offset != notFound)
1198 structureSet.add(structure);
1200 if (offset != notFound)
1201 ASSERT(structureSet.size());
1205 case access_get_by_id_self_list: {
1206 PolymorphicAccessStructureList* list = stubInfo.u.getByIdProtoList.structureList;
1207 unsigned size = stubInfo.u.getByIdProtoList.listSize;
1208 for (unsigned i = 0; i < size; ++i) {
1209 if (!list->list[i].isDirect) {
1214 Structure* structure = list->list[i].base.get();
1215 if (structureSet.contains(structure))
1218 size_t myOffset = structure->get(*m_globalData, identifier);
1220 if (myOffset == notFound) {
1227 else if (offset != myOffset) {
1232 structureSet.add(structure);
1235 if (offset != notFound)
1236 ASSERT(structureSet.size());
1241 ASSERT(offset == notFound);
1246 if (offset != notFound) {
1247 ASSERT(structureSet.size());
1248 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), base);
1249 set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), addToGraph(GetPropertyStorage, base)));
1251 StorageAccessData storageAccessData;
1252 storageAccessData.offset = offset;
1253 storageAccessData.identifierNumber = identifierNumber;
1254 m_graph.m_storageAccessData.append(storageAccessData);
1256 set(currentInstruction[1].u.operand, addToGraph(GetById, OpInfo(identifierNumber), OpInfo(prediction), base));
1258 NEXT_OPCODE(op_get_by_id);
1261 case op_put_by_id: {
1262 NodeIndex value = get(currentInstruction[3].u.operand);
1263 NodeIndex base = get(currentInstruction[1].u.operand);
1264 unsigned identifierNumber = currentInstruction[2].u.operand;
1265 bool direct = currentInstruction[8].u.operand;
1267 StructureStubInfo& stubInfo = m_profiledBlock->getStubInfo(m_currentIndex);
1269 addToGraph(ForceOSRExit);
1271 bool alreadyGenerated = false;
1273 if (stubInfo.seen && !m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
1274 switch (stubInfo.accessType) {
1275 case access_put_by_id_replace: {
1276 Structure* structure = stubInfo.u.putByIdReplace.baseObjectStructure.get();
1277 Identifier identifier = m_codeBlock->identifier(identifierNumber);
1278 size_t offset = structure->get(*m_globalData, identifier);
1280 if (offset != notFound) {
1281 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), base);
1282 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
1284 StorageAccessData storageAccessData;
1285 storageAccessData.offset = offset;
1286 storageAccessData.identifierNumber = identifierNumber;
1287 m_graph.m_storageAccessData.append(storageAccessData);
1289 alreadyGenerated = true;
1294 case access_put_by_id_transition: {
1295 Structure* previousStructure = stubInfo.u.putByIdTransition.previousStructure.get();
1296 Structure* newStructure = stubInfo.u.putByIdTransition.structure.get();
1298 if (previousStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity())
1301 StructureChain* structureChain = stubInfo.u.putByIdTransition.chain.get();
1303 Identifier identifier = m_codeBlock->identifier(identifierNumber);
1304 size_t offset = newStructure->get(*m_globalData, identifier);
1306 if (offset != notFound) {
1307 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(previousStructure)), base);
1309 for (WriteBarrier<Structure>* it = structureChain->head(); *it; ++it) {
1310 JSValue prototype = (*it)->storedPrototype();
1311 if (prototype.isNull())
1313 ASSERT(prototype.isCell());
1314 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(prototype.asCell()->structure())), cellConstant(prototype.asCell()));
1317 addToGraph(PutStructure, OpInfo(m_graph.addStructureTransitionData(StructureTransitionData(previousStructure, newStructure))), base);
1319 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
1321 StorageAccessData storageAccessData;
1322 storageAccessData.offset = offset;
1323 storageAccessData.identifierNumber = identifierNumber;
1324 m_graph.m_storageAccessData.append(storageAccessData);
1326 alreadyGenerated = true;
1336 if (!alreadyGenerated) {
1338 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
1340 addToGraph(PutById, OpInfo(identifierNumber), base, value);
1343 NEXT_OPCODE(op_put_by_id);
1346 case op_get_global_var: {
1347 PredictedType prediction = getPrediction();
1349 NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand));
1350 set(currentInstruction[1].u.operand, getGlobalVar);
1351 m_graph.predictGlobalVar(currentInstruction[2].u.operand, prediction);
1352 NEXT_OPCODE(op_get_global_var);
1355 case op_put_global_var: {
1356 NodeIndex value = get(currentInstruction[2].u.operand);
1357 addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value);
1358 NEXT_OPCODE(op_put_global_var);
1361 // === Block terminators. ===
1364 unsigned relativeOffset = currentInstruction[1].u.operand;
1365 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
1366 LAST_OPCODE(op_jmp);
1370 unsigned relativeOffset = currentInstruction[1].u.operand;
1371 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
1372 LAST_OPCODE(op_loop);
1376 unsigned relativeOffset = currentInstruction[2].u.operand;
1377 NodeIndex condition = get(currentInstruction[1].u.operand);
1378 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
1379 LAST_OPCODE(op_jtrue);
1383 unsigned relativeOffset = currentInstruction[2].u.operand;
1384 NodeIndex condition = get(currentInstruction[1].u.operand);
1385 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
1386 LAST_OPCODE(op_jfalse);
1389 case op_loop_if_true: {
1390 unsigned relativeOffset = currentInstruction[2].u.operand;
1391 NodeIndex condition = get(currentInstruction[1].u.operand);
1392 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
1393 LAST_OPCODE(op_loop_if_true);
1396 case op_loop_if_false: {
1397 unsigned relativeOffset = currentInstruction[2].u.operand;
1398 NodeIndex condition = get(currentInstruction[1].u.operand);
1399 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
1400 LAST_OPCODE(op_loop_if_false);
1404 unsigned relativeOffset = currentInstruction[2].u.operand;
1405 NodeIndex value = get(currentInstruction[1].u.operand);
1406 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
1407 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
1408 LAST_OPCODE(op_jeq_null);
1411 case op_jneq_null: {
1412 unsigned relativeOffset = currentInstruction[2].u.operand;
1413 NodeIndex value = get(currentInstruction[1].u.operand);
1414 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
1415 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
1416 LAST_OPCODE(op_jneq_null);
1420 unsigned relativeOffset = currentInstruction[3].u.operand;
1421 NodeIndex op1 = get(currentInstruction[1].u.operand);
1422 NodeIndex op2 = get(currentInstruction[2].u.operand);
1423 NodeIndex condition = addToGraph(CompareLess, op1, op2);
1424 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
1425 LAST_OPCODE(op_jless);
1429 unsigned relativeOffset = currentInstruction[3].u.operand;
1430 NodeIndex op1 = get(currentInstruction[1].u.operand);
1431 NodeIndex op2 = get(currentInstruction[2].u.operand);
1432 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
1433 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
1434 LAST_OPCODE(op_jlesseq);
1438 unsigned relativeOffset = currentInstruction[3].u.operand;
1439 NodeIndex op1 = get(currentInstruction[1].u.operand);
1440 NodeIndex op2 = get(currentInstruction[2].u.operand);
1441 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
1442 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
1443 LAST_OPCODE(op_jgreater);
1446 case op_jgreatereq: {
1447 unsigned relativeOffset = currentInstruction[3].u.operand;
1448 NodeIndex op1 = get(currentInstruction[1].u.operand);
1449 NodeIndex op2 = get(currentInstruction[2].u.operand);
1450 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
1451 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
1452 LAST_OPCODE(op_jgreatereq);
1456 unsigned relativeOffset = currentInstruction[3].u.operand;
1457 NodeIndex op1 = get(currentInstruction[1].u.operand);
1458 NodeIndex op2 = get(currentInstruction[2].u.operand);
1459 NodeIndex condition = addToGraph(CompareLess, op1, op2);
1460 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
1461 LAST_OPCODE(op_jnless);
1465 unsigned relativeOffset = currentInstruction[3].u.operand;
1466 NodeIndex op1 = get(currentInstruction[1].u.operand);
1467 NodeIndex op2 = get(currentInstruction[2].u.operand);
1468 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
1469 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
1470 LAST_OPCODE(op_jnlesseq);
1473 case op_jngreater: {
1474 unsigned relativeOffset = currentInstruction[3].u.operand;
1475 NodeIndex op1 = get(currentInstruction[1].u.operand);
1476 NodeIndex op2 = get(currentInstruction[2].u.operand);
1477 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
1478 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
1479 LAST_OPCODE(op_jngreater);
1482 case op_jngreatereq: {
1483 unsigned relativeOffset = currentInstruction[3].u.operand;
1484 NodeIndex op1 = get(currentInstruction[1].u.operand);
1485 NodeIndex op2 = get(currentInstruction[2].u.operand);
1486 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
1487 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
1488 LAST_OPCODE(op_jngreatereq);
1491 case op_loop_if_less: {
1492 unsigned relativeOffset = currentInstruction[3].u.operand;
1493 NodeIndex op1 = get(currentInstruction[1].u.operand);
1494 NodeIndex op2 = get(currentInstruction[2].u.operand);
1495 NodeIndex condition = addToGraph(CompareLess, op1, op2);
1496 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
1497 LAST_OPCODE(op_loop_if_less);
1500 case op_loop_if_lesseq: {
1501 unsigned relativeOffset = currentInstruction[3].u.operand;
1502 NodeIndex op1 = get(currentInstruction[1].u.operand);
1503 NodeIndex op2 = get(currentInstruction[2].u.operand);
1504 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
1505 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
1506 LAST_OPCODE(op_loop_if_lesseq);
1509 case op_loop_if_greater: {
1510 unsigned relativeOffset = currentInstruction[3].u.operand;
1511 NodeIndex op1 = get(currentInstruction[1].u.operand);
1512 NodeIndex op2 = get(currentInstruction[2].u.operand);
1513 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
1514 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
1515 LAST_OPCODE(op_loop_if_greater);
1518 case op_loop_if_greatereq: {
1519 unsigned relativeOffset = currentInstruction[3].u.operand;
1520 NodeIndex op1 = get(currentInstruction[1].u.operand);
1521 NodeIndex op2 = get(currentInstruction[2].u.operand);
1522 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
1523 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
1524 LAST_OPCODE(op_loop_if_greatereq);
1528 addToGraph(Return, get(currentInstruction[1].u.operand));
1529 LAST_OPCODE(op_ret);
1532 addToGraph(Return, get(currentInstruction[1].u.operand));
1533 LAST_OPCODE(op_end);
1536 addToGraph(Throw, get(currentInstruction[1].u.operand));
1537 LAST_OPCODE(op_throw);
1539 case op_throw_reference_error:
1540 addToGraph(ThrowReferenceError);
1541 LAST_OPCODE(op_throw_reference_error);
1544 NodeIndex callTarget = get(currentInstruction[1].u.operand);
1545 if (m_graph.isFunctionConstant(m_codeBlock, callTarget)) {
1546 int argCount = currentInstruction[2].u.operand;
1547 int registerOffset = currentInstruction[3].u.operand;
1548 int firstArg = registerOffset - argCount - RegisterFile::CallFrameHeaderSize;
1549 int lastArg = firstArg + argCount - 1;
1551 // Do we have a result?
1552 bool usesResult = false;
1553 int resultOperand = 0; // make compiler happy
1554 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
1555 PredictedType prediction = PredictNone;
1556 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
1557 resultOperand = putInstruction[1].u.operand;
1559 prediction = getPrediction(m_graph.size(), m_currentIndex + OPCODE_LENGTH(op_call));
1562 DFG::Intrinsic intrinsic = m_graph.valueOfFunctionConstant(m_codeBlock, callTarget)->executable()->intrinsic();
1564 if (handleIntrinsic(usesResult, resultOperand, intrinsic, firstArg, lastArg, prediction)) {
1565 // NEXT_OPCODE() has to be inside braces.
1566 NEXT_OPCODE(op_call);
1570 addCall(interpreter, currentInstruction, Call);
1571 NEXT_OPCODE(op_call);
1574 case op_construct: {
1575 addCall(interpreter, currentInstruction, Construct);
1576 NEXT_OPCODE(op_construct);
1579 case op_call_put_result:
1580 NEXT_OPCODE(op_call_put_result);
1583 PredictedType prediction = getPrediction();
1585 unsigned identifier = currentInstruction[2].u.operand;
1587 NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier), OpInfo(prediction));
1588 set(currentInstruction[1].u.operand, resolve);
1590 NEXT_OPCODE(op_resolve);
1593 case op_resolve_base: {
1594 PredictedType prediction = getPrediction();
1596 unsigned identifier = currentInstruction[2].u.operand;
1598 NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier), OpInfo(prediction));
1599 set(currentInstruction[1].u.operand, resolve);
1601 NEXT_OPCODE(op_resolve_base);
1604 case op_resolve_global: {
1605 PredictedType prediction = getPrediction();
1607 NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
1608 m_graph.m_resolveGlobalData.append(ResolveGlobalData());
1609 ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
1610 data.identifierNumber = currentInstruction[2].u.operand;
1611 data.resolveInfoIndex = m_globalResolveNumber++;
1612 set(currentInstruction[1].u.operand, resolve);
1614 NEXT_OPCODE(op_resolve_global);
1617 case op_loop_hint: {
1618 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
1619 // OSR can only happen at basic block boundaries. Assert that these two statements
1621 ASSERT_UNUSED(blockBegin, m_currentIndex == blockBegin);
1623 m_currentBlock->isOSRTarget = true;
1625 NEXT_OPCODE(op_loop_hint);
1629 // Parse failed! This should not happen because the capabilities checker
1630 // should have caught it.
1631 ASSERT_NOT_REACHED();
1635 ASSERT(canCompileOpcode(opcodeID));
1639 template<ByteCodeParser::PhiStackType stackType>
1640 void ByteCodeParser::processPhiStack()
1642 Vector<PhiStackEntry, 16>& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack;
1644 while (!phiStack.isEmpty()) {
1645 PhiStackEntry entry = phiStack.last();
1646 phiStack.removeLast();
1648 PredecessorList& predecessors = entry.m_block->m_predecessors;
1649 unsigned varNo = entry.m_varNo;
1650 VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
1652 for (size_t i = 0; i < predecessors.size(); ++i) {
1653 BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
1655 VariableRecord& var = (stackType == ArgumentPhiStack) ? predecessorBlock->m_argumentsAtTail[varNo] : predecessorBlock->m_localsAtTail[varNo];
1657 NodeIndex valueInPredecessor = var.value;
1658 if (valueInPredecessor == NoNode) {
1659 valueInPredecessor = addToGraph(Phi, OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? varNo - m_codeBlock->m_numParameters - RegisterFile::CallFrameHeaderSize : varNo)));
1660 var.setFirstTime(valueInPredecessor);
1661 if (stackType == ArgumentPhiStack)
1662 predecessorBlock->m_argumentsAtHead[varNo].setFirstTime(valueInPredecessor);
1664 predecessorBlock->m_localsAtHead[varNo].setFirstTime(valueInPredecessor);
1665 phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
1666 } else if (m_graph[valueInPredecessor].op == GetLocal) {
1667 // We want to ensure that the VariableAccessDatas are identical between the
1668 // GetLocal and its block-local Phi. Strictly speaking we only need the two
1669 // to be unified. But for efficiency, we want the code that creates GetLocals
1670 // and Phis to try to reuse VariableAccessDatas as much as possible.
1671 ASSERT(m_graph[valueInPredecessor].variableAccessData() == m_graph[m_graph[valueInPredecessor].child1()].variableAccessData());
1673 valueInPredecessor = m_graph[valueInPredecessor].child1();
1675 ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi || (m_graph[valueInPredecessor].op == SetArgument && stackType == ArgumentPhiStack));
1677 VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
1679 dataForPredecessor->unify(dataForPhi);
1681 Node* phiNode = &m_graph[entry.m_phi];
1682 if (phiNode->refCount())
1683 m_graph.ref(valueInPredecessor);
1685 if (phiNode->child1() == NoNode) {
1686 phiNode->children.fixed.child1 = valueInPredecessor;
1689 if (phiNode->child2() == NoNode) {
1690 phiNode->children.fixed.child2 = valueInPredecessor;
1693 if (phiNode->child3() == NoNode) {
1694 phiNode->children.fixed.child3 = valueInPredecessor;
1698 NodeIndex newPhi = addToGraph(Phi, OpInfo(dataForPhi));
1700 phiNode = &m_graph[entry.m_phi]; // reload after vector resize
1701 Node& newPhiNode = m_graph[newPhi];
1702 if (phiNode->refCount())
1703 m_graph.ref(newPhi);
1705 newPhiNode.children.fixed.child1 = phiNode->child1();
1706 newPhiNode.children.fixed.child2 = phiNode->child2();
1707 newPhiNode.children.fixed.child3 = phiNode->child3();
1709 phiNode->children.fixed.child1 = newPhi;
1710 phiNode->children.fixed.child1 = valueInPredecessor;
1711 phiNode->children.fixed.child3 = NoNode;
1716 void ByteCodeParser::setupPredecessors()
1718 for (BlockIndex index = 0; index < m_graph.m_blocks.size(); ++index) {
1719 BasicBlock* block = m_graph.m_blocks[index].get();
1720 ASSERT(block->end != NoNode);
1721 Node& node = m_graph[block->end - 1];
1722 ASSERT(node.isTerminal());
1725 m_graph.blockForBytecodeOffset(node.takenBytecodeOffset()).m_predecessors.append(index);
1726 else if (node.isBranch()) {
1727 m_graph.blockForBytecodeOffset(node.takenBytecodeOffset()).m_predecessors.append(index);
1728 m_graph.blockForBytecodeOffset(node.notTakenBytecodeOffset()).m_predecessors.append(index);
1733 bool ByteCodeParser::parse()
1735 // Set during construction.
1736 ASSERT(!m_currentIndex);
1738 for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= m_codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) {
1739 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
1740 unsigned limit = jumpTargetIndex < m_codeBlock->numberOfJumpTargets() ? m_codeBlock->jumpTarget(jumpTargetIndex) : m_codeBlock->instructions().size();
1741 ASSERT(m_currentIndex < limit);
1743 // Loop until we reach the current limit (i.e. next jump target).
1745 OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
1746 m_currentBlock = block.get();
1747 m_graph.m_blocks.append(block.release());
1749 if (!parseBlock(limit))
1751 // We should not have gone beyond the limit.
1752 ASSERT(m_currentIndex <= limit);
1754 m_currentBlock->end = m_graph.size();
1755 } while (m_currentIndex < limit);
1758 // Should have reached the end of the instructions.
1759 ASSERT(m_currentIndex == m_codeBlock->instructions().size());
1761 setupPredecessors();
1762 processPhiStack<LocalPhiStack>();
1763 processPhiStack<ArgumentPhiStack>();
1765 m_graph.m_preservedVars = m_preservedVars;
1766 m_graph.m_localVars = m_numLocals;
1767 m_graph.m_parameterSlots = m_parameterSlots;
1772 bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
1774 #if DFG_DEBUG_LOCAL_DISBALE
1775 UNUSED_PARAM(graph);
1776 UNUSED_PARAM(globalData);
1777 UNUSED_PARAM(codeBlock);
1780 return ByteCodeParser(globalData, codeBlock, codeBlock->alternative(), graph).parse();
1784 } } // namespace JSC::DFG